Merge tag 'master-2014-07-31' of git://git.kernel.org/pub/scm/linux/kernel/git/linvil...
authorDavid S. Miller <davem@davemloft.net>
Tue, 5 Aug 2014 20:18:20 +0000 (13:18 -0700)
committerDavid S. Miller <davem@davemloft.net>
Tue, 5 Aug 2014 20:18:20 +0000 (13:18 -0700)
Conflicts:
net/6lowpan/iphc.c

Minor conflicts in iphc.c were changes overlapping with some
style cleanups.

John W. Linville says:

====================
Please pull this last(?) batch of wireless change intended for the
3.17 stream...

For the NFC bits, Samuel says:

"This is a rather quiet one, we have:

- A new driver from ST Microelectronics for their NCI ST21NFCB,
  including device tree  support.

- p2p support for the ST21NFCA driver

- A few fixes an enhancements for the NFC digital laye"

For the Atheros bits, Kalle says:

"Michal and Janusz did some important RX aggregation fixes, basically we
were missing RX reordering altogether. The 10.1 firmware doesn't support
Ad-Hoc mode and Michal fixed ath10k so that it doesn't advertise Ad-Hoc
support with that firmware. Also he implemented a workaround for a KVM
issue."

For the Bluetooth bits, Gustavo and Johan say:

"To quote Gustavo from his previous request:

'Some last minute fixes for -next. We have a fix for a use after free in
RFCOMM, another fix to an issue with ADV_DIRECT_IND and one for ADV_IND with
auto-connection handling.  Last, we added support for reading the codec and
MWS setting for controllers that support these features.'

Additionally there are fixes to LE scanning, an update to conform to the 4.1
core specification as well as fixes for tracking the page scan state. All
of these fixes are important for 3.17."

And,

"We've got:

- 6lowpan fixes/cleanups
- A couple crash fixes, one for the Marvell HCI driver and another in LE SMP.
- Fix for an incorrect connected state check
- Fix for the bondable requirement during pairing (an issue which had
  crept in because of using "pairable" when in fact the actual meaning
  was "bondable" (these have different meanings in Bluetooth)"

Along with those are some late-breaking hardware support patches in
brcmfmac and b43 as well as a stray ath9k patch.
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
2064 files changed:
CREDITS
Documentation/ABI/testing/sysfs-class-net
Documentation/Changes
Documentation/DocBook/gadget.tmpl
Documentation/DocBook/genericirq.tmpl
Documentation/DocBook/kernel-locking.tmpl
Documentation/DocBook/libata.tmpl
Documentation/DocBook/media/Makefile
Documentation/DocBook/media_api.tmpl
Documentation/DocBook/mtdnand.tmpl
Documentation/DocBook/regulator.tmpl
Documentation/DocBook/uio-howto.tmpl
Documentation/DocBook/usb.tmpl
Documentation/DocBook/writing-an-alsa-driver.tmpl
Documentation/accounting/getdelays.c
Documentation/acpi/enumeration.txt
Documentation/cpu-freq/intel-pstate.txt
Documentation/devicetree/bindings/arm/armada-38x.txt
Documentation/devicetree/bindings/arm/exynos/power_domain.txt
Documentation/devicetree/bindings/arm/l2cc.txt
Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
Documentation/devicetree/bindings/i2c/i2c-rk3x.txt [new file with mode: 0644]
Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/amd-xgbe-phy.txt
Documentation/devicetree/bindings/net/amd-xgbe.txt
Documentation/devicetree/bindings/net/broadcom-systemport.txt
Documentation/devicetree/bindings/net/davinci-mdio.txt
Documentation/devicetree/bindings/net/ieee802154/cc2520.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/marvell-pp2.txt [new file with mode: 0644]
Documentation/devicetree/bindings/net/sh_eth.txt
Documentation/devicetree/bindings/net/stmmac.txt
Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
Documentation/devicetree/bindings/vendor-prefixes.txt
Documentation/email-clients.txt
Documentation/hwmon/ntc_thermistor
Documentation/input/event-codes.txt
Documentation/kbuild/makefiles.txt
Documentation/kernel-parameters.txt
Documentation/laptops/00-INDEX
Documentation/laptops/freefall.c [new file with mode: 0644]
Documentation/laptops/hpfall.c [deleted file]
Documentation/memory-hotplug.txt
Documentation/networking/bonding.txt
Documentation/networking/filter.txt
Documentation/networking/i40e.txt
Documentation/networking/ip-sysctl.txt
Documentation/networking/packet_mmap.txt
Documentation/networking/phy.txt
Documentation/networking/pktgen.txt
Documentation/networking/timestamping.txt
Documentation/networking/timestamping/timestamping.c
Documentation/ptp/testptp.c
Documentation/sound/alsa/HD-Audio-Models.txt
Documentation/sysctl/kernel.txt
Documentation/sysctl/vm.txt
Documentation/thermal/nouveau_thermal
Documentation/trace/postprocess/trace-vmscan-postprocess.pl
MAINTAINERS
Makefile
arch/arc/include/asm/cache.h
arch/arc/include/uapi/asm/ptrace.h
arch/arc/kernel/ctx_sw_asm.S
arch/arc/kernel/devtree.c
arch/arc/kernel/head.S
arch/arc/kernel/ptrace.c
arch/arc/kernel/smp.c
arch/arc/kernel/vmlinux.lds.S
arch/arc/mm/cache_arc700.c
arch/arm/Kconfig
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-evm.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am335x-igep0033.dtsi
arch/arm/boot/dts/am43x-epos-evm.dts
arch/arm/boot/dts/armada-375-db.dts
arch/arm/boot/dts/armada-380.dtsi
arch/arm/boot/dts/armada-385-db.dts
arch/arm/boot/dts/armada-385-rd.dts
arch/arm/boot/dts/armada-385.dtsi
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/at91sam9261.dtsi
arch/arm/boot/dts/at91sam9261ek.dts
arch/arm/boot/dts/at91sam9n12.dtsi
arch/arm/boot/dts/at91sam9x5.dtsi
arch/arm/boot/dts/dra7-evm.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/dra7xx-clocks.dtsi
arch/arm/boot/dts/exynos4.dtsi
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/hi3620.dtsi
arch/arm/boot/dts/imx51-babbage.dts
arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
arch/arm/boot/dts/imx53-m53evk.dts
arch/arm/boot/dts/imx6dl-hummingboard.dts
arch/arm/boot/dts/imx6q-gw51xx.dts
arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
arch/arm/boot/dts/imx6qdl-microsom.dtsi
arch/arm/boot/dts/imx6sl.dtsi
arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
arch/arm/boot/dts/omap3-beagle-xm.dts
arch/arm/boot/dts/omap3-evm-common.dtsi
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/r8a7791.dtsi
arch/arm/boot/dts/sama5d3_gmac.dtsi
arch/arm/boot/dts/socfpga.dtsi
arch/arm/boot/dts/stih415.dtsi
arch/arm/boot/dts/stih416-b2020-revE.dts [deleted file]
arch/arm/boot/dts/stih416-b2020e.dts [new file with mode: 0644]
arch/arm/boot/dts/stih416.dtsi
arch/arm/common/scoop.c
arch/arm/configs/bcm_defconfig
arch/arm/configs/imx_v6_v7_defconfig
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/mvebu_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/crypto/aesbs-glue.c
arch/arm/include/asm/ftrace.h
arch/arm/include/asm/mach/arch.h
arch/arm/include/asm/mcpm.h
arch/arm/include/asm/thread_info.h
arch/arm/kernel/devtree.c
arch/arm/kernel/kprobes-test-arm.c
arch/arm/kernel/kprobes-test.c
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/probes-arm.c
arch/arm/kernel/ptrace.c
arch/arm/kernel/topology.c
arch/arm/mach-bcm/Kconfig
arch/arm/mach-berlin/Kconfig
arch/arm/mach-cns3xxx/Kconfig
arch/arm/mach-davinci/Kconfig
arch/arm/mach-exynos/Kconfig
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/firmware.c
arch/arm/mach-exynos/hotplug.c
arch/arm/mach-exynos/mcpm-exynos.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-exynos/pm.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-highbank/Kconfig
arch/arm/mach-imx/Kconfig
arch/arm/mach-imx/clk-gate2.c
arch/arm/mach-imx/clk-imx6q.c
arch/arm/mach-imx/clk-imx6sl.c
arch/arm/mach-integrator/Kconfig
arch/arm/mach-integrator/impd1.c
arch/arm/mach-integrator/integrator_ap.c
arch/arm/mach-integrator/integrator_cp.c
arch/arm/mach-keystone/Kconfig
arch/arm/mach-moxart/Kconfig
arch/arm/mach-mvebu/Kconfig
arch/arm/mach-mvebu/Makefile
arch/arm/mach-mvebu/board-v7.c
arch/arm/mach-mvebu/coherency.c
arch/arm/mach-mvebu/headsmp-a9.S
arch/arm/mach-mvebu/pmsu.c
arch/arm/mach-mvebu/pmsu_ll.S [new file with mode: 0644]
arch/arm/mach-nomadik/Kconfig
arch/arm/mach-omap2/Kconfig
arch/arm/mach-omap2/Makefile
arch/arm/mach-omap2/clkt_dpll.c
arch/arm/mach-omap2/cm-regbits-34xx.h
arch/arm/mach-omap2/cm33xx.h
arch/arm/mach-omap2/common.h
arch/arm/mach-omap2/devices.c
arch/arm/mach-omap2/dsp.c
arch/arm/mach-omap2/gpmc-nand.c
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-omap2/id.c
arch/arm/mach-omap2/mux.c
arch/arm/mach-omap2/omap4-common.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_54xx_data.c
arch/arm/mach-omap2/omap_hwmod_7xx_data.c
arch/arm/mach-omap2/prm-regbits-34xx.h
arch/arm/mach-omap2/soc.h
arch/arm/mach-prima2/Kconfig
arch/arm/mach-qcom/Kconfig
arch/arm/mach-s3c24xx/Kconfig
arch/arm/mach-s3c64xx/Kconfig
arch/arm/mach-s5p64x0/Kconfig
arch/arm/mach-s5pc100/Kconfig
arch/arm/mach-s5pv210/Kconfig
arch/arm/mach-sa1100/collie.c
arch/arm/mach-shmobile/Kconfig
arch/arm/mach-spear/Kconfig
arch/arm/mach-sti/Kconfig
arch/arm/mach-sunxi/sunxi.c
arch/arm/mach-tegra/Kconfig
arch/arm/mach-u300/Kconfig
arch/arm/mach-ux500/Kconfig
arch/arm/mach-vexpress/Kconfig
arch/arm/mach-vt8500/Kconfig
arch/arm/mach-zynq/Kconfig
arch/arm/mm/Kconfig
arch/arm/mm/cache-l2x0.c
arch/arm/mm/nommu.c
arch/arm/mm/proc-arm925.S
arch/arm/net/bpf_jit_32.c
arch/arm/plat-samsung/Kconfig
arch/arm/xen/grant-table.c
arch/arm64/Kconfig
arch/arm64/boot/dts/apm-mustang.dts
arch/arm64/boot/dts/apm-storm.dtsi
arch/arm64/configs/defconfig
arch/arm64/crypto/aes-glue.c
arch/arm64/crypto/ghash-ce-core.S
arch/arm64/crypto/ghash-ce-glue.c
arch/arm64/include/asm/Kbuild
arch/arm64/include/asm/dma-mapping.h
arch/arm64/include/asm/memory.h
arch/arm64/include/asm/pgtable.h
arch/arm64/include/asm/ptrace.h
arch/arm64/include/uapi/asm/posix_types.h [new file with mode: 0644]
arch/arm64/include/uapi/asm/sigcontext.h
arch/arm64/kernel/efi-entry.S
arch/arm64/kernel/efi-stub.c
arch/arm64/kernel/entry-ftrace.S
arch/arm64/kernel/entry.S
arch/arm64/kernel/head.S
arch/arm64/kernel/ptrace.c
arch/arm64/mm/copypage.c
arch/arm64/mm/flush.c
arch/arm64/mm/init.c
arch/blackfin/configs/BF609-EZKIT_defconfig
arch/blackfin/kernel/vmlinux.lds.S
arch/blackfin/mach-bf533/boards/blackstamp.c
arch/blackfin/mach-bf537/boards/cm_bf537e.c
arch/blackfin/mach-bf537/boards/cm_bf537u.c
arch/blackfin/mach-bf537/boards/tcm_bf537.c
arch/blackfin/mach-bf548/boards/ezkit.c
arch/blackfin/mach-bf561/boards/acvilon.c
arch/blackfin/mach-bf561/boards/cm_bf561.c
arch/blackfin/mach-bf561/boards/ezkit.c
arch/blackfin/mach-bf609/boards/ezkit.c
arch/blackfin/mach-bf609/include/mach/pm.h
arch/blackfin/mach-bf609/pm.c
arch/blackfin/mach-common/ints-priority.c
arch/ia64/hp/common/sba_iommu.c
arch/ia64/include/uapi/asm/fcntl.h
arch/m68k/kernel/head.S
arch/m68k/kernel/time.c
arch/mips/Kconfig
arch/mips/include/asm/sigcontext.h
arch/mips/include/asm/uasm.h
arch/mips/include/uapi/asm/inst.h
arch/mips/include/uapi/asm/sigcontext.h
arch/mips/kernel/asm-offsets.c
arch/mips/kernel/irq-msc01.c
arch/mips/kernel/pm-cps.c
arch/mips/kernel/r4k_fpu.S
arch/mips/kernel/signal.c
arch/mips/kernel/signal32.c
arch/mips/kernel/smp-cps.c
arch/mips/kvm/kvm_mips.c
arch/mips/math-emu/ieee754.c
arch/mips/mm/uasm-micromips.c
arch/mips/mm/uasm-mips.c
arch/mips/mm/uasm.c
arch/mips/net/bpf_jit.c
arch/parisc/include/uapi/asm/signal.h
arch/parisc/kernel/hardware.c
arch/parisc/kernel/sys_parisc32.c
arch/parisc/kernel/syscall_table.S
arch/parisc/mm/init.c
arch/powerpc/Kconfig
arch/powerpc/Kconfig.debug
arch/powerpc/include/asm/code-patching.h
arch/powerpc/include/asm/cputable.h
arch/powerpc/include/asm/kvm_book3s_64.h
arch/powerpc/include/asm/mmu-hash64.h
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/perf_event_server.h
arch/powerpc/include/asm/ppc_asm.h
arch/powerpc/include/asm/swab.h
arch/powerpc/kernel/cputable.c
arch/powerpc/kernel/ftrace.c
arch/powerpc/kernel/idle_power7.S
arch/powerpc/kernel/iomap.c
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom_init.c
arch/powerpc/kernel/prom_init_check.sh
arch/powerpc/kernel/rtas_flash.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/smp.c
arch/powerpc/kvm/book3s_64_mmu_hv.c
arch/powerpc/kvm/book3s_hv_interrupts.S
arch/powerpc/kvm/book3s_hv_rm_mmu.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/kvm/book3s_interrupts.S
arch/powerpc/kvm/book3s_rmhandlers.S
arch/powerpc/kvm/book3s_rtas.c
arch/powerpc/kvm/e500_mmu_host.c
arch/powerpc/lib/mem_64.S
arch/powerpc/lib/sstep.c
arch/powerpc/mm/mmu_context_nohash.c
arch/powerpc/net/bpf_jit_comp.c
arch/powerpc/perf/core-book3s.c
arch/powerpc/perf/power8-pmu.c
arch/powerpc/platforms/cell/cbe_thermal.c
arch/powerpc/platforms/cell/spu_syscalls.c
arch/powerpc/platforms/cell/spufs/Makefile
arch/powerpc/platforms/cell/spufs/syscalls.c
arch/powerpc/platforms/powernv/Makefile
arch/powerpc/platforms/powernv/opal-elog.c
arch/powerpc/platforms/powernv/opal-takeover.S [deleted file]
arch/powerpc/platforms/pseries/dlpar.c
arch/powerpc/platforms/pseries/reconfig.c
arch/powerpc/sysdev/dart_iommu.c
arch/s390/configs/default_defconfig
arch/s390/configs/gcov_defconfig
arch/s390/configs/performance_defconfig
arch/s390/configs/zfcpdump_defconfig
arch/s390/defconfig
arch/s390/include/asm/mmu_context.h
arch/s390/include/asm/switch_to.h
arch/s390/include/uapi/asm/Kbuild
arch/s390/include/uapi/asm/sie.h
arch/s390/include/uapi/asm/ucontext.h
arch/s390/kernel/compat_linux.h
arch/s390/kernel/head.S
arch/s390/kernel/ptrace.c
arch/s390/net/bpf_jit_comp.c
arch/s390/pci/pci.c
arch/sh/Makefile
arch/sparc/Kconfig
arch/sparc/crypto/aes_glue.c
arch/sparc/include/asm/atomic_32.h
arch/sparc/include/asm/atomic_64.h
arch/sparc/include/asm/auxio.h
arch/sparc/include/asm/auxio_32.h
arch/sparc/include/asm/auxio_64.h
arch/sparc/include/asm/bitext.h
arch/sparc/include/asm/bitops_32.h
arch/sparc/include/asm/bitops_64.h
arch/sparc/include/asm/btext.h
arch/sparc/include/asm/bug.h
arch/sparc/include/asm/cacheflush_32.h
arch/sparc/include/asm/cacheflush_64.h
arch/sparc/include/asm/checksum_32.h
arch/sparc/include/asm/checksum_64.h
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/cmpxchg_64.h
arch/sparc/include/asm/cpudata.h
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/asm/delay_32.h
arch/sparc/include/asm/delay_64.h
arch/sparc/include/asm/device.h
arch/sparc/include/asm/dma-mapping.h
arch/sparc/include/asm/ebus_dma.h
arch/sparc/include/asm/floppy_32.h
arch/sparc/include/asm/floppy_64.h
arch/sparc/include/asm/ftrace.h
arch/sparc/include/asm/highmem.h
arch/sparc/include/asm/hvtramp.h
arch/sparc/include/asm/hypervisor.h
arch/sparc/include/asm/idprom.h
arch/sparc/include/asm/io-unit.h
arch/sparc/include/asm/io_32.h
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/iommu_32.h
arch/sparc/include/asm/iommu_64.h
arch/sparc/include/asm/irq_32.h
arch/sparc/include/asm/irq_64.h
arch/sparc/include/asm/irqflags_32.h
arch/sparc/include/asm/kdebug_64.h
arch/sparc/include/asm/kgdb.h
arch/sparc/include/asm/kprobes.h
arch/sparc/include/asm/ldc.h
arch/sparc/include/asm/leon.h
arch/sparc/include/asm/leon_pci.h
arch/sparc/include/asm/mc146818rtc.h
arch/sparc/include/asm/mdesc.h
arch/sparc/include/asm/mmu_64.h
arch/sparc/include/asm/mmu_context_64.h
arch/sparc/include/asm/nmi.h
arch/sparc/include/asm/oplib_32.h
arch/sparc/include/asm/oplib_64.h
arch/sparc/include/asm/page.h
arch/sparc/include/asm/page_64.h
arch/sparc/include/asm/pci_64.h
arch/sparc/include/asm/pcic.h
arch/sparc/include/asm/pcr.h
arch/sparc/include/asm/pgalloc_32.h
arch/sparc/include/asm/pgalloc_64.h
arch/sparc/include/asm/pgtable_32.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/processor_32.h
arch/sparc/include/asm/processor_64.h
arch/sparc/include/asm/prom.h
arch/sparc/include/asm/ptrace.h
arch/sparc/include/asm/setup.h
arch/sparc/include/asm/sfp-machine_32.h
arch/sparc/include/asm/smp_32.h
arch/sparc/include/asm/smp_64.h
arch/sparc/include/asm/spitfire.h
arch/sparc/include/asm/stacktrace.h
arch/sparc/include/asm/starfire.h
arch/sparc/include/asm/string_32.h
arch/sparc/include/asm/string_64.h
arch/sparc/include/asm/switch_to_32.h
arch/sparc/include/asm/switch_to_64.h
arch/sparc/include/asm/syscalls.h
arch/sparc/include/asm/timer_32.h
arch/sparc/include/asm/timer_64.h
arch/sparc/include/asm/tlb_64.h
arch/sparc/include/asm/tlbflush_64.h
arch/sparc/include/asm/topology_64.h
arch/sparc/include/asm/trap_block.h
arch/sparc/include/asm/uaccess.h
arch/sparc/include/asm/uaccess_32.h
arch/sparc/include/asm/uaccess_64.h
arch/sparc/include/asm/vio.h
arch/sparc/include/asm/visasm.h
arch/sparc/include/asm/xor_64.h
arch/sparc/include/uapi/asm/unistd.h
arch/sparc/kernel/Makefile
arch/sparc/kernel/audit.c
arch/sparc/kernel/auxio_32.c
arch/sparc/kernel/btext.c
arch/sparc/kernel/compat_audit.c
arch/sparc/kernel/cpu.c
arch/sparc/kernel/cpumap.h
arch/sparc/kernel/devices.c
arch/sparc/kernel/entry.h
arch/sparc/kernel/iommu.c
arch/sparc/kernel/iommu_common.h
arch/sparc/kernel/ioport.c
arch/sparc/kernel/irq.h
arch/sparc/kernel/irq_32.c
arch/sparc/kernel/kernel.h
arch/sparc/kernel/kgdb_64.c
arch/sparc/kernel/kprobes.c
arch/sparc/kernel/leon_kernel.c
arch/sparc/kernel/leon_pci.c
arch/sparc/kernel/leon_pci_grpci1.c
arch/sparc/kernel/leon_pci_grpci2.c
arch/sparc/kernel/leon_pmc.c
arch/sparc/kernel/leon_smp.c
arch/sparc/kernel/of_device_common.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/pci_impl.h
arch/sparc/kernel/pci_sun4v.h
arch/sparc/kernel/pcic.c
arch/sparc/kernel/perf_event.c
arch/sparc/kernel/process_32.c
arch/sparc/kernel/process_64.c
arch/sparc/kernel/prom.h
arch/sparc/kernel/prom_64.c
arch/sparc/kernel/psycho_common.h
arch/sparc/kernel/ptrace_32.c
arch/sparc/kernel/setup_32.c
arch/sparc/kernel/signal32.c
arch/sparc/kernel/signal_32.c
arch/sparc/kernel/signal_64.c
arch/sparc/kernel/smp_32.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/sun4d_irq.c
arch/sparc/kernel/sys32.S
arch/sparc/kernel/sys_sparc32.c
arch/sparc/kernel/sys_sparc_32.c
arch/sparc/kernel/sys_sparc_64.c
arch/sparc/kernel/systbls.h
arch/sparc/kernel/systbls_32.S
arch/sparc/kernel/systbls_64.S
arch/sparc/kernel/tadpole.c [deleted file]
arch/sparc/kernel/time_32.c
arch/sparc/kernel/traps_32.c
arch/sparc/kernel/traps_64.c
arch/sparc/kernel/unaligned_32.c
arch/sparc/kernel/unaligned_64.c
arch/sparc/kernel/windows.c
arch/sparc/lib/Makefile
arch/sparc/math-emu/sfp-util_32.h
arch/sparc/math-emu/sfp-util_64.h
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/mm/init_32.c
arch/sparc/mm/init_64.c
arch/sparc/mm/init_64.h
arch/sparc/mm/io-unit.c
arch/sparc/mm/iommu.c
arch/sparc/mm/leon_mm.c
arch/sparc/mm/mm_32.h [new file with mode: 0644]
arch/sparc/mm/srmmu.c
arch/sparc/mm/srmmu.h [deleted file]
arch/sparc/mm/tsb.c
arch/sparc/net/bpf_jit_comp.c
arch/sparc/prom/misc_64.c
arch/um/kernel/tlb.c
arch/um/kernel/trap.c
arch/um/os-Linux/skas/process.c
arch/unicore32/Kconfig
arch/unicore32/include/asm/io.h
arch/unicore32/include/asm/pgtable.h
arch/unicore32/include/asm/ptrace.h
arch/unicore32/kernel/clock.c
arch/unicore32/kernel/ksyms.c
arch/unicore32/kernel/ksyms.h
arch/unicore32/kernel/module.c
arch/unicore32/kernel/process.c
arch/unicore32/kernel/setup.c
arch/unicore32/mm/alignment.c
arch/unicore32/mm/proc-syms.c
arch/x86/Kconfig
arch/x86/boot/compressed/aslr.c
arch/x86/boot/header.S
arch/x86/boot/tools/build.c
arch/x86/crypto/sha512_ssse3_glue.c
arch/x86/include/asm/irq.h
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/ptrace.h
arch/x86/kernel/apic/hw_nmi.c
arch/x86/kernel/apm_32.c
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/entry_32.S
arch/x86/kernel/espfix_64.c
arch/x86/kernel/kprobes/core.c
arch/x86/kernel/signal.c
arch/x86/kernel/traps.c
arch/x86/kernel/tsc.c
arch/x86/kvm/svm.c
arch/x86/kvm/x86.c
arch/x86/net/bpf_jit_comp.c
arch/x86/vdso/Makefile
arch/x86/vdso/vclock_gettime.c
arch/x86/vdso/vdso-fakesections.c
arch/x86/vdso/vdso-layout.lds.S
arch/x86/vdso/vdso.lds.S
arch/x86/vdso/vdso2c.c
arch/x86/vdso/vdso2c.h
arch/x86/vdso/vdso32/vdso-fakesections.c [new file with mode: 0644]
arch/x86/vdso/vdsox32.lds.S
arch/x86/vdso/vma.c
arch/x86/xen/enlighten.c
arch/x86/xen/grant-table.c
arch/x86/xen/setup.c
arch/x86/xen/xen-ops.h
arch/xtensa/kernel/vectors.S
arch/xtensa/kernel/vmlinux.lds.S
arch/xtensa/mm/init.c
block/bio.c
block/blk-cgroup.c
block/blk-cgroup.h
block/blk-core.c
block/blk-flush.c
block/blk-merge.c
block/blk-mq-tag.c
block/blk-mq-tag.h
block/blk-mq.c
block/blk-tag.c
block/blk.h
block/compat_ioctl.c
block/elevator.c
drivers/acpi/ac.c
drivers/acpi/acpi_lpss.c
drivers/acpi/acpi_pnp.c
drivers/acpi/battery.c
drivers/acpi/ec.c
drivers/acpi/osl.c
drivers/acpi/resource.c
drivers/acpi/tables.c
drivers/acpi/video.c
drivers/acpi/video_detect.c
drivers/ata/ahci.c
drivers/ata/ahci.h
drivers/ata/ahci_imx.c
drivers/ata/ahci_platform.c
drivers/ata/ahci_xgene.c
drivers/ata/libahci.c
drivers/ata/libahci_platform.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/pata_ep93xx.c
drivers/atm/eni.c
drivers/base/dma-contiguous.c
drivers/base/platform.c
drivers/block/drbd/drbd_nl.c
drivers/block/drbd/drbd_receiver.c
drivers/block/floppy.c
drivers/block/null_blk.c
drivers/block/rbd.c
drivers/block/zram/zram_drv.c
drivers/bus/Kconfig
drivers/char/hw_random/core.c
drivers/char/hw_random/virtio-rng.c
drivers/char/i8k.c
drivers/char/random.c
drivers/clk/clk-s2mps11.c
drivers/clk/qcom/mmcc-msm8960.c
drivers/clk/samsung/clk-exynos4.c
drivers/clk/samsung/clk-exynos5250.c
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/samsung/clk-s3c2410.c
drivers/clk/samsung/clk-s3c64xx.c
drivers/clk/spear/spear3xx_clock.c
drivers/clk/sunxi/clk-sun6i-apb0-gates.c
drivers/clk/ti/apll.c
drivers/clk/ti/dpll.c
drivers/clk/ti/mux.c
drivers/clocksource/exynos_mct.c
drivers/cpufreq/Kconfig
drivers/cpufreq/Kconfig.arm
drivers/cpufreq/Makefile
drivers/cpufreq/cpufreq-cpu0.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/sa1110-cpufreq.c
drivers/cpuidle/cpuidle-armada-370-xp.c
drivers/crypto/caam/jr.c
drivers/dma/cppi41.c
drivers/dma/imx-sdma.c
drivers/firewire/Kconfig
drivers/firewire/net.c
drivers/firewire/ohci.c
drivers/firmware/efi/efi-pstore.c
drivers/firmware/efi/efi.c
drivers/firmware/efi/fdt.c
drivers/gpio/gpio-mcp23s08.c
drivers/gpio/gpio-rcar.c
drivers/gpu/drm/drm_drv.c
drivers/gpu/drm/drm_modeset_lock.c
drivers/gpu/drm/exynos/exynos_drm_dpi.c
drivers/gpu/drm/exynos/exynos_drm_drv.c
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_hdmi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/exynos/regs-mixer.h
drivers/gpu/drm/i2c/tda998x_drv.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_dma.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_context.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_render_state.c
drivers/gpu/drm/i915/i915_gem_stolen.c
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_reg.h
drivers/gpu/drm/i915/intel_bios.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_drv.h
drivers/gpu/drm/i915/intel_dsi.c
drivers/gpu/drm/i915/intel_dsi_cmd.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_opregion.c
drivers/gpu/drm/i915/intel_panel.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.h
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/i915/intel_sprite.c
drivers/gpu/drm/i915/intel_uncore.c
drivers/gpu/drm/msm/hdmi/hdmi.c
drivers/gpu/drm/msm/hdmi/hdmi.h
drivers/gpu/drm/msm/hdmi/hdmi_connector.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_fbdev.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_mmu.h
drivers/gpu/drm/nouveau/Makefile
drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
drivers/gpu/drm/nouveau/core/engine/disp/base.c
drivers/gpu/drm/nouveau/core/engine/disp/dport.c
drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
drivers/gpu/drm/nouveau/core/engine/graph/fuc/gpc.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hub.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubgm107.fuc5.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnv108.fuc5.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvc0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvd7.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnve0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/hubnvf0.fuc.h
drivers/gpu/drm/nouveau/core/engine/graph/fuc/macros.fuc
drivers/gpu/drm/nouveau/core/engine/graph/fuc/os.h
drivers/gpu/drm/nouveau/core/engine/graph/nv50.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.c
drivers/gpu/drm/nouveau/core/engine/graph/nvc0.h
drivers/gpu/drm/nouveau/core/include/subdev/i2c.h
drivers/gpu/drm/nouveau/core/subdev/clock/nve0.c
drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c [new file with mode: 0644]
drivers/gpu/drm/nouveau/core/subdev/ibus/nve0.c
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/host.fuc
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nv108.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nva3.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvc0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/pwr/fuc/nvd0.fuc.h
drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
drivers/gpu/drm/nouveau/nouveau_display.c
drivers/gpu/drm/nouveau/nouveau_drm.c
drivers/gpu/drm/nouveau/nouveau_fbcon.c
drivers/gpu/drm/nouveau/nouveau_fbcon.h
drivers/gpu/drm/nouveau/nv50_display.c
drivers/gpu/drm/qxl/qxl_irq.c
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/atombios_encoders.c
drivers/gpu/drm/radeon/ci_dpm.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/cikd.h
drivers/gpu/drm/radeon/cypress_dpm.c
drivers/gpu/drm/radeon/evergreen.c
drivers/gpu/drm/radeon/evergreen_reg.h
drivers/gpu/drm/radeon/kv_dpm.c
drivers/gpu/drm/radeon/ni_dpm.c
drivers/gpu/drm/radeon/r500_reg.h
drivers/gpu/drm/radeon/r600.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_atombios.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_cs.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_display.c
drivers/gpu/drm/radeon/radeon_drv.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mode.h
drivers/gpu/drm/radeon/radeon_pm.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv515.c
drivers/gpu/drm/radeon/rv770_dpm.c
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/trinity_dpm.c
drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
drivers/hid/Kconfig
drivers/hid/hid-ids.h
drivers/hid/hid-rmi.c
drivers/hid/hid-sensor-hub.c
drivers/hid/usbhid/hid-quirks.c
drivers/hsi/clients/ssi_protocol.c
drivers/hv/connection.c
drivers/hv/hv_fcopy.c
drivers/hv/hv_kvp.c
drivers/hv/hv_util.c
drivers/hwmon/Kconfig
drivers/hwmon/adc128d818.c
drivers/hwmon/adm1021.c
drivers/hwmon/adm1029.c
drivers/hwmon/adm1031.c
drivers/hwmon/adt7470.c
drivers/hwmon/amc6821.c
drivers/hwmon/da9052-hwmon.c
drivers/hwmon/da9055-hwmon.c
drivers/hwmon/emc2103.c
drivers/hwmon/gpio-fan.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/smsc47m192.c
drivers/hwmon/w83l786ng.c
drivers/i2c/busses/Kconfig
drivers/i2c/busses/Makefile
drivers/i2c/busses/i2c-rk3x.c [new file with mode: 0644]
drivers/i2c/busses/i2c-sun6i-p2wi.c [new file with mode: 0644]
drivers/i2c/muxes/Kconfig
drivers/ide/Kconfig
drivers/ide/ide-probe.c
drivers/iio/accel/hid-sensor-accel-3d.c
drivers/iio/accel/mma8452.c
drivers/iio/adc/ad799x.c
drivers/iio/adc/at91_adc.c
drivers/iio/adc/men_z188_adc.c
drivers/iio/adc/ti_am335x_adc.c
drivers/iio/adc/twl4030-madc.c
drivers/iio/common/hid-sensors/hid-sensor-trigger.c
drivers/iio/gyro/hid-sensor-gyro-3d.c
drivers/iio/industrialio-event.c
drivers/iio/inkern.c
drivers/iio/light/hid-sensor-als.c
drivers/iio/light/hid-sensor-prox.c
drivers/iio/light/tcs3472.c
drivers/iio/magnetometer/ak8975.c
drivers/iio/magnetometer/hid-sensor-magn-3d.c
drivers/iio/pressure/hid-sensor-press.c
drivers/iio/pressure/mpl3115.c
drivers/infiniband/hw/amso1100/c2_provider.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/ev.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/provider.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
drivers/infiniband/hw/mlx5/cq.c
drivers/infiniband/hw/mlx5/mad.c
drivers/infiniband/hw/mlx5/main.c
drivers/infiniband/hw/mlx5/mem.c
drivers/infiniband/hw/mlx5/mlx5_ib.h
drivers/infiniband/hw/mlx5/mr.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/mlx5/srq.c
drivers/infiniband/ulp/ipoib/ipoib_main.c
drivers/input/input.c
drivers/input/keyboard/st-keyscan.c
drivers/input/misc/sirfsoc-onkey.c
drivers/input/mouse/synaptics.c
drivers/input/serio/i8042-x86ia64io.h
drivers/input/tablet/wacom_wac.c
drivers/input/touchscreen/ti_am335x_tsc.c
drivers/iommu/amd_iommu_v2.c
drivers/iommu/fsl_pamu.c
drivers/iommu/fsl_pamu_domain.c
drivers/iommu/intel-iommu.c
drivers/irqchip/irq-armada-370-xp.c
drivers/irqchip/irq-brcmstb-l2.c
drivers/irqchip/irq-gic.c
drivers/irqchip/spear-shirq.c
drivers/isdn/capi/capi.c
drivers/isdn/gigaset/bas-gigaset.c
drivers/isdn/hisax/Kconfig
drivers/isdn/hisax/l3ni1.c
drivers/isdn/i4l/isdn_net.c
drivers/isdn/i4l/isdn_ppp.c
drivers/isdn/pcbit/drv.c
drivers/macintosh/smu.c
drivers/md/dm-cache-metadata.c
drivers/md/dm-crypt.c
drivers/md/dm-io.c
drivers/md/dm-mpath.c
drivers/md/dm-thin-metadata.c
drivers/md/dm-zero.c
drivers/md/dm.c
drivers/md/md.c
drivers/media/dvb-core/dvb_net.c
drivers/media/dvb-frontends/si2168.c
drivers/media/dvb-frontends/si2168_priv.h
drivers/media/dvb-frontends/tda10071.c
drivers/media/dvb-frontends/tda10071_priv.h
drivers/media/pci/saa7134/saa7134-empress.c
drivers/media/platform/davinci/vpif_capture.c
drivers/media/platform/davinci/vpif_display.c
drivers/media/tuners/si2157.c
drivers/media/usb/dvb-usb-v2/af9035.c
drivers/media/usb/gspca/pac7302.c
drivers/media/usb/hdpvr/hdpvr-video.c
drivers/media/v4l2-core/v4l2-dv-timings.c
drivers/memstick/host/rtsx_pci_ms.c
drivers/mfd/Kconfig
drivers/mfd/ab8500-core.c
drivers/misc/Kconfig
drivers/misc/sgi-xp/xpnet.c
drivers/misc/vexpress-syscfg.c
drivers/misc/vmw_balloon.c
drivers/mtd/chips/cfi_cmdset_0001.c
drivers/mtd/devices/elm.c
drivers/mtd/nand/nand_base.c
drivers/mtd/ubi/fastmap.c
drivers/net/Kconfig
drivers/net/Makefile
drivers/net/arcnet/arcnet.c
drivers/net/bonding/bond_3ad.c
drivers/net/bonding/bond_alb.c
drivers/net/bonding/bond_alb.h
drivers/net/bonding/bond_debugfs.c
drivers/net/bonding/bond_main.c
drivers/net/bonding/bond_netlink.c
drivers/net/bonding/bond_options.c
drivers/net/bonding/bond_procfs.c
drivers/net/bonding/bond_sysfs.c
drivers/net/bonding/bonding.h
drivers/net/caif/caif_serial.c
drivers/net/caif/caif_spi.c
drivers/net/caif/caif_virtio.c
drivers/net/can/c_can/c_can_platform.c
drivers/net/can/dev.c
drivers/net/can/sja1000/sja1000.c
drivers/net/can/slcan.c
drivers/net/cris/eth_v10.c
drivers/net/dummy.c
drivers/net/eql.c
drivers/net/ethernet/8390/lib8390.c
drivers/net/ethernet/8390/mac8390.c
drivers/net/ethernet/adi/Kconfig
drivers/net/ethernet/adi/bfin_mac.c
drivers/net/ethernet/adi/bfin_mac.h
drivers/net/ethernet/allwinner/sun4i-emac.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/amd8111e.c
drivers/net/ethernet/amd/declance.c
drivers/net/ethernet/amd/xgbe/Makefile
drivers/net/ethernet/amd/xgbe/xgbe-common.h
drivers/net/ethernet/amd/xgbe/xgbe-dcb.c [new file with mode: 0644]
drivers/net/ethernet/amd/xgbe/xgbe-debugfs.c
drivers/net/ethernet/amd/xgbe/xgbe-desc.c
drivers/net/ethernet/amd/xgbe/xgbe-dev.c
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/amd/xgbe/xgbe-ethtool.c
drivers/net/ethernet/amd/xgbe/xgbe-main.c
drivers/net/ethernet/amd/xgbe/xgbe-mdio.c
drivers/net/ethernet/amd/xgbe/xgbe-ptp.c [new file with mode: 0644]
drivers/net/ethernet/amd/xgbe/xgbe.h
drivers/net/ethernet/arc/emac.h
drivers/net/ethernet/arc/emac_main.c
drivers/net/ethernet/atheros/atl1c/atl1c_hw.c
drivers/net/ethernet/atheros/atl1e/atl1e_hw.c
drivers/net/ethernet/atheros/atlx/atl1.c
drivers/net/ethernet/atheros/atlx/atl2.c
drivers/net/ethernet/broadcom/Kconfig
drivers/net/ethernet/broadcom/bcmsysport.c
drivers/net/ethernet/broadcom/bcmsysport.h
drivers/net/ethernet/broadcom/bnx2.c
drivers/net/ethernet/broadcom/bnx2.h
drivers/net/ethernet/broadcom/bnx2_fw.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_dcb.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_sriov.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_vfpf.h
drivers/net/ethernet/broadcom/cnic.c
drivers/net/ethernet/broadcom/cnic.h
drivers/net/ethernet/broadcom/cnic_defs.h
drivers/net/ethernet/broadcom/cnic_if.h
drivers/net/ethernet/broadcom/genet/Makefile
drivers/net/ethernet/broadcom/genet/bcmgenet.c
drivers/net/ethernet/broadcom/genet/bcmgenet.h
drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c [new file with mode: 0644]
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/broadcom/tg3.c
drivers/net/ethernet/brocade/bna/cna_fwimg.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/chelsio/Kconfig
drivers/net/ethernet/chelsio/cxgb4/Makefile
drivers/net/ethernet/chelsio/cxgb4/cxgb4.h
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h [new file with mode: 0644]
drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.h
drivers/net/ethernet/chelsio/cxgb4/l2t.c
drivers/net/ethernet/chelsio/cxgb4/l2t.h
drivers/net/ethernet/chelsio/cxgb4/sge.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
drivers/net/ethernet/chelsio/cxgb4/t4_hw.h
drivers/net/ethernet/chelsio/cxgb4/t4_msg.h
drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
drivers/net/ethernet/chelsio/cxgb4/t4fw_api.h
drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
drivers/net/ethernet/cirrus/cs89x0.c
drivers/net/ethernet/cisco/enic/Makefile
drivers/net/ethernet/cisco/enic/enic.h
drivers/net/ethernet/cisco/enic/enic_api.c
drivers/net/ethernet/cisco/enic/enic_clsf.c [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_clsf.h [new file with mode: 0644]
drivers/net/ethernet/cisco/enic/enic_dev.c
drivers/net/ethernet/cisco/enic/enic_dev.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/enic_res.c
drivers/net/ethernet/cisco/enic/vnic_dev.c
drivers/net/ethernet/cisco/enic/vnic_dev.h
drivers/net/ethernet/cisco/enic/vnic_devcmd.h
drivers/net/ethernet/cisco/enic/vnic_enet.h
drivers/net/ethernet/cisco/enic/vnic_rq.h
drivers/net/ethernet/davicom/dm9000.c
drivers/net/ethernet/dec/tulip/de4x5.c
drivers/net/ethernet/dec/tulip/timer.c
drivers/net/ethernet/dnet.c
drivers/net/ethernet/emulex/benet/be.h
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_cmds.h
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/freescale/fec.h
drivers/net/ethernet/freescale/fec_main.c
drivers/net/ethernet/freescale/ucc_geth.c
drivers/net/ethernet/freescale/xgmac_mdio.c
drivers/net/ethernet/intel/e1000/e1000_ethtool.c
drivers/net/ethernet/intel/e1000/e1000_hw.c
drivers/net/ethernet/intel/e1000e/82571.c
drivers/net/ethernet/intel/e1000e/defines.h
drivers/net/ethernet/intel/e1000e/ethtool.c
drivers/net/ethernet/intel/e1000e/ich8lan.c
drivers/net/ethernet/intel/e1000e/ich8lan.h
drivers/net/ethernet/intel/e1000e/mac.c
drivers/net/ethernet/intel/e1000e/manage.c
drivers/net/ethernet/intel/e1000e/netdev.c
drivers/net/ethernet/intel/e1000e/nvm.c
drivers/net/ethernet/intel/i40e/Makefile
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_adminq.c
drivers/net/ethernet/intel/i40e/i40e_adminq.h
drivers/net/ethernet/intel/i40e/i40e_common.c
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_ethtool.c
drivers/net/ethernet/intel/i40e/i40e_fcoe.c [new file with mode: 0644]
drivers/net/ethernet/intel/i40e/i40e_fcoe.h [new file with mode: 0644]
drivers/net/ethernet/intel/i40e/i40e_hmc.h
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.c
drivers/net/ethernet/intel/i40e/i40e_lan_hmc.h
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_nvm.c
drivers/net/ethernet/intel/i40e/i40e_osdep.h
drivers/net/ethernet/intel/i40e/i40e_prototype.h
drivers/net/ethernet/intel/i40e/i40e_ptp.c
drivers/net/ethernet/intel/i40e/i40e_register.h
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_txrx.h
drivers/net/ethernet/intel/i40e/i40e_type.h
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.c
drivers/net/ethernet/intel/i40evf/i40e_adminq.h
drivers/net/ethernet/intel/i40evf/i40e_common.c
drivers/net/ethernet/intel/i40evf/i40e_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_lan_hmc.h
drivers/net/ethernet/intel/i40evf/i40e_register.h
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.h
drivers/net/ethernet/intel/i40evf/i40e_type.h
drivers/net/ethernet/intel/i40evf/i40evf_ethtool.c
drivers/net/ethernet/intel/i40evf/i40evf_main.c
drivers/net/ethernet/intel/i40evf/i40evf_virtchnl.c
drivers/net/ethernet/intel/igb/e1000_82575.c
drivers/net/ethernet/intel/igb/e1000_defines.h
drivers/net/ethernet/intel/igb/e1000_hw.h
drivers/net/ethernet/intel/igb/e1000_i210.c
drivers/net/ethernet/intel/igb/e1000_i210.h
drivers/net/ethernet/intel/igb/e1000_regs.h
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c
drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c
drivers/net/ethernet/intel/ixgbe/ixgbe_common.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c
drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c
drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c
drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c
drivers/net/ethernet/intel/ixgbe/ixgbe_type.h
drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c
drivers/net/ethernet/intel/ixgbevf/vf.h
drivers/net/ethernet/marvell/Kconfig
drivers/net/ethernet/marvell/Makefile
drivers/net/ethernet/marvell/mvneta.c
drivers/net/ethernet/marvell/mvpp2.c [new file with mode: 0644]
drivers/net/ethernet/marvell/skge.c
drivers/net/ethernet/mellanox/mlx4/cq.c
drivers/net/ethernet/mellanox/mlx4/en_cq.c
drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
drivers/net/ethernet/mellanox/mlx4/en_main.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_rx.c
drivers/net/ethernet/mellanox/mlx4/en_selftest.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/eq.c
drivers/net/ethernet/mellanox/mlx4/main.c
drivers/net/ethernet/mellanox/mlx4/mcg.c
drivers/net/ethernet/mellanox/mlx4/mlx4.h
drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
drivers/net/ethernet/mellanox/mlx4/port.c
drivers/net/ethernet/mellanox/mlx5/core/alloc.c
drivers/net/ethernet/mellanox/mlx5/core/cmd.c
drivers/net/ethernet/mellanox/mlx5/core/eq.c
drivers/net/ethernet/mellanox/mlx5/core/mad.c
drivers/net/ethernet/mellanox/mlx5/core/main.c
drivers/net/ethernet/mellanox/mlx5/core/mr.c
drivers/net/ethernet/mellanox/mlx5/core/pagealloc.c
drivers/net/ethernet/mellanox/mlx5/core/port.c
drivers/net/ethernet/micrel/ks8851_mll.c
drivers/net/ethernet/neterion/vxge/vxge-main.c
drivers/net/ethernet/octeon/octeon_mgmt.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
drivers/net/ethernet/qlogic/qlcnic/qlcnic_dcb.c
drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
drivers/net/ethernet/realtek/8139cp.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/renesas/sh_eth.c
drivers/net/ethernet/sfc/ef10.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/efx.h
drivers/net/ethernet/sfc/ethtool.c
drivers/net/ethernet/sfc/falcon.c
drivers/net/ethernet/sfc/mcdi_port.c
drivers/net/ethernet/sfc/net_driver.h
drivers/net/ethernet/sfc/nic.h
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/sfc/selftest.c
drivers/net/ethernet/sfc/siena.c
drivers/net/ethernet/sfc/tx.c
drivers/net/ethernet/sis/sis900.c
drivers/net/ethernet/smsc/smc911x.c
drivers/net/ethernet/stmicro/stmmac/common.h
drivers/net/ethernet/stmicro/stmmac/dwmac-socfpga.c
drivers/net/ethernet/stmicro/stmmac/dwmac1000.h
drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
drivers/net/ethernet/stmicro/stmmac/dwmac100_core.c
drivers/net/ethernet/stmicro/stmmac/enh_desc.c
drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/sun/niu.c
drivers/net/ethernet/sun/sunvnet.c
drivers/net/ethernet/ti/Kconfig
drivers/net/ethernet/ti/cpmac.c
drivers/net/ethernet/ti/cpsw.c
drivers/net/ethernet/ti/cpsw_ale.c
drivers/net/ethernet/ti/cpsw_ale.h
drivers/net/ethernet/ti/cpts.c
drivers/net/ethernet/ti/davinci_mdio.c
drivers/net/ethernet/ti/tlan.c
drivers/net/ethernet/ti/tlan.h
drivers/net/ethernet/tile/tilegx.c
drivers/net/ethernet/tile/tilepro.c
drivers/net/ethernet/toshiba/ps3_gelic_wireless.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/ethernet/xilinx/xilinx_axienet_mdio.c
drivers/net/ethernet/xilinx/xilinx_emaclite.c
drivers/net/fddi/defxx.c
drivers/net/fddi/defxx.h
drivers/net/hamradio/6pack.c
drivers/net/hamradio/baycom_epp.c
drivers/net/hamradio/bpqether.c
drivers/net/hamradio/dmascc.c
drivers/net/hamradio/hdlcdrv.c
drivers/net/hamradio/mkiss.c
drivers/net/hamradio/scc.c
drivers/net/hamradio/yam.c
drivers/net/hyperv/hyperv_net.h
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/netvsc_drv.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/Kconfig
drivers/net/ieee802154/Makefile
drivers/net/ieee802154/at86rf230.c
drivers/net/ieee802154/cc2520.c [new file with mode: 0644]
drivers/net/ieee802154/fakehard.c
drivers/net/ieee802154/mrf24j40.c
drivers/net/ifb.c
drivers/net/irda/kingsun-sir.c
drivers/net/loopback.c
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/at803x.c
drivers/net/phy/dp83640.c
drivers/net/phy/mdio_bus.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/phy/phy_device.c
drivers/net/phy/spi_ks8995.c
drivers/net/ppp/ppp_generic.c
drivers/net/ppp/pppoe.c
drivers/net/slip/slhc.c
drivers/net/slip/slip.c
drivers/net/slip/slip.h
drivers/net/team/team_mode_loadbalance.c
drivers/net/tun.c
drivers/net/usb/ax88179_178a.c
drivers/net/usb/cdc-phonet.c
drivers/net/usb/cdc_ether.c
drivers/net/usb/hso.c
drivers/net/usb/huawei_cdc_ncm.c
drivers/net/usb/qmi_wwan.c
drivers/net/usb/r8152.c
drivers/net/usb/smsc95xx.c
drivers/net/veth.c
drivers/net/virtio_net.c
drivers/net/vmxnet3/vmxnet3_drv.c
drivers/net/vmxnet3/vmxnet3_ethtool.c
drivers/net/vmxnet3/vmxnet3_int.h
drivers/net/vxlan.c
drivers/net/wan/dlci.c
drivers/net/wan/farsync.c
drivers/net/wan/hdlc.c
drivers/net/wan/hdlc_fr.c
drivers/net/wan/lapbether.c
drivers/net/wan/sbni.c
drivers/net/wan/sdla.c
drivers/net/wan/x25_asy.c
drivers/net/wimax/i2400m/usb.c
drivers/net/wireless/airo.c
drivers/net/wireless/ath/ath6kl/cfg80211.c
drivers/net/wireless/ath/ath6kl/init.c
drivers/net/wireless/ath/carl9170/phy.c
drivers/net/wireless/ath/wcn36xx/main.c
drivers/net/wireless/ath/wil6210/netdev.c
drivers/net/wireless/brcm80211/brcmfmac/dhd_linux.c
drivers/net/wireless/brcm80211/brcmfmac/p2p.c
drivers/net/wireless/brcm80211/brcmsmac/phy/phy_n.c
drivers/net/wireless/cw1200/fwio.c
drivers/net/wireless/libertas/main.c
drivers/net/wireless/libertas/mesh.c
drivers/net/wireless/mac80211_hwsim.c
drivers/net/wireless/mwifiex/cfg80211.c
drivers/net/wireless/mwl8k.c
drivers/net/wireless/orinoco/orinoco_usb.c
drivers/net/wireless/prism54/oid_mgt.c
drivers/net/wireless/rtlwifi/core.c
drivers/net/wireless/rtlwifi/rtl8188ee/pwrseqcmd.c
drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
drivers/net/wireless/rtlwifi/rtl8192se/fw.c
drivers/net/wireless/rtlwifi/rtl8192se/hw.c
drivers/net/wireless/rtlwifi/rtl8723ae/hw.c
drivers/net/wireless/rtlwifi/rtl8723be/pwrseqcmd.c
drivers/net/xen-netback/common.h
drivers/net/xen-netback/interface.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/of/base.c
drivers/of/fdt.c
drivers/of/of_mdio.c
drivers/of/platform.c
drivers/parport/Kconfig
drivers/pci/pci.c
drivers/phy/Kconfig
drivers/phy/phy-core.c
drivers/phy/phy-omap-usb2.c
drivers/phy/phy-samsung-usb2.c
drivers/pinctrl/berlin/berlin.c
drivers/pinctrl/pinctrl-st.c
drivers/pinctrl/sunxi/pinctrl-sunxi.c
drivers/ptp/Kconfig
drivers/ptp/ptp_chardev.c
drivers/regulator/as3722-regulator.c
drivers/regulator/bcm590xx-regulator.c
drivers/regulator/ltc3589.c
drivers/regulator/palmas-regulator.c
drivers/regulator/tps65218-regulator.c
drivers/remoteproc/Kconfig
drivers/rtc/rtc-puv3.c
drivers/s390/block/dcssblk.c
drivers/s390/char/Makefile
drivers/s390/char/raw3270.c
drivers/s390/char/sclp_vt220.c
drivers/s390/char/vmlogrdr.c
drivers/s390/char/vmwatchdog.c [deleted file]
drivers/s390/cio/airq.c
drivers/s390/cio/ccwgroup.c
drivers/s390/cio/cio.c
drivers/s390/cio/device.c
drivers/s390/cio/qdio_debug.c
drivers/s390/cio/qdio_debug.h
drivers/s390/cio/qdio_main.c
drivers/s390/crypto/ap_bus.c
drivers/s390/crypto/zcrypt_api.c
drivers/s390/net/claw.c
drivers/s390/net/ctcm_main.c
drivers/s390/net/netiucv.c
drivers/s390/net/qeth_core.h
drivers/s390/net/qeth_core_main.c
drivers/s390/net/qeth_core_mpc.h
drivers/s390/net/qeth_core_sys.c
drivers/s390/net/qeth_l2_main.c
drivers/s390/net/qeth_l3_main.c
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/bnx2fc/bnx2fc_fcoe.c
drivers/scsi/bnx2fc/bnx2fc_io.c
drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
drivers/scsi/cxgbi/libcxgbi.c
drivers/scsi/cxgbi/libcxgbi.h
drivers/scsi/ibmvscsi/ibmvscsi.c
drivers/scsi/mvsas/mv_94xx.c
drivers/scsi/mvsas/mv_94xx.h
drivers/scsi/pm8001/pm8001_init.c
drivers/scsi/qla2xxx/qla_target.c
drivers/scsi/qla2xxx/qla_target.h
drivers/scsi/scsi_error.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sd.c
drivers/scsi/virtio_scsi.c
drivers/spi/spi-pxa2xx-dma.c
drivers/spi/spi-pxa2xx.c
drivers/spi/spi-qup.c
drivers/spi/spi-sh-sci.c
drivers/staging/android/timed_output.c
drivers/staging/comedi/Kconfig
drivers/staging/cxt1e1/linux.c
drivers/staging/gdm724x/gdm_lte.c
drivers/staging/gdm72xx/gdm_wimax.c
drivers/staging/iio/Kconfig
drivers/staging/iio/adc/ad7291.c
drivers/staging/iio/adc/mxs-lradc.c
drivers/staging/iio/light/tsl2x7x_core.c
drivers/staging/imx-drm/parallel-display.c
drivers/staging/media/omap4iss/Kconfig
drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
drivers/staging/rtl8723au/os_dep/os_intfs.c
drivers/staging/tidspbridge/core/tiomap3430.c
drivers/staging/vt6655/wpactl.c
drivers/staging/wlan-ng/p80211netdev.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_auth.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_util.c
drivers/target/loopback/tcm_loop.c
drivers/target/target_core_device.c
drivers/tc/tc.c
drivers/thermal/imx_thermal.c
drivers/thermal/of-thermal.c
drivers/thermal/thermal_hwmon.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/tty/n_gsm.c
drivers/tty/n_tty.c
drivers/tty/serial/8250/8250_core.c
drivers/tty/serial/8250/8250_early.c
drivers/tty/serial/altera_uart.c
drivers/tty/serial/amba-pl010.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/arc_uart.c
drivers/tty/serial/atmel_serial.c
drivers/tty/serial/bcm63xx_uart.c
drivers/tty/serial/bfin_uart.c
drivers/tty/serial/dz.c
drivers/tty/serial/earlycon.c
drivers/tty/serial/efm32-uart.c
drivers/tty/serial/fsl_lpuart.c
drivers/tty/serial/imx.c
drivers/tty/serial/ip22zilog.c
drivers/tty/serial/m32r_sio.c
drivers/tty/serial/max310x.c
drivers/tty/serial/mcf.c
drivers/tty/serial/mfd.c
drivers/tty/serial/mpsc.c
drivers/tty/serial/msm_serial.c
drivers/tty/serial/mxs-auart.c
drivers/tty/serial/netx-serial.c
drivers/tty/serial/pmac_zilog.c
drivers/tty/serial/pnx8xxx_uart.c
drivers/tty/serial/pxa.c
drivers/tty/serial/samsung.c
drivers/tty/serial/sb1250-duart.c
drivers/tty/serial/sccnxp.c
drivers/tty/serial/serial_ks8695.c
drivers/tty/serial/serial_txx9.c
drivers/tty/serial/sirfsoc_uart.c
drivers/tty/serial/st-asc.c
drivers/tty/serial/sunsab.c
drivers/tty/serial/sunsu.c
drivers/tty/serial/sunzilog.c
drivers/tty/serial/ucc_uart.c
drivers/tty/serial/vr41xx_siu.c
drivers/tty/serial/zs.c
drivers/tty/vt/vt.c
drivers/uio/uio.c
drivers/usb/chipidea/udc.c
drivers/usb/core/hub.c
drivers/usb/core/hub.h
drivers/usb/core/port.c
drivers/usb/dwc3/Kconfig
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/dwc3/gadget.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/configfs.h
drivers/usb/gadget/f_fs.c
drivers/usb/gadget/f_phonet.c
drivers/usb/gadget/f_rndis.c
drivers/usb/gadget/gr_udc.c
drivers/usb/gadget/inode.c
drivers/usb/gadget/u_ether.c
drivers/usb/host/Kconfig
drivers/usb/host/pci-quirks.c
drivers/usb/host/xhci-hub.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/misc/usbtest.c
drivers/usb/musb/musb_am335x.c
drivers/usb/musb/musb_core.c
drivers/usb/musb/musb_cppi41.c
drivers/usb/musb/musb_dsps.c
drivers/usb/musb/ux500.c
drivers/usb/phy/phy-msm-usb.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/option.c
drivers/usb/storage/scsiglue.c
drivers/usb/storage/unusual_devs.h
drivers/vhost/net.c
drivers/vhost/scsi.c
drivers/video/console/dummycon.c
drivers/video/console/vgacon.c
drivers/video/fbdev/atmel_lcdfb.c
drivers/video/fbdev/bfin_adv7393fb.c
drivers/video/fbdev/offb.c
drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
drivers/video/fbdev/vt8500lcdfb.c
drivers/w1/masters/mxc_w1.c
drivers/watchdog/Kconfig
drivers/watchdog/Makefile
drivers/watchdog/diag288_wdt.c [new file with mode: 0644]
drivers/xen/balloon.c
drivers/xen/grant-table.c
drivers/xen/manage.c
firmware/Makefile
fs/afs/main.c
fs/aio.c
fs/autofs4/inode.c
fs/btrfs/compression.c
fs/btrfs/ctree.h
fs/btrfs/dev-replace.c
fs/btrfs/disk-io.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.h
fs/btrfs/extent_map.c
fs/btrfs/extent_map.h
fs/btrfs/free-space-cache.c
fs/btrfs/inode.c
fs/btrfs/ioctl.c
fs/btrfs/locking.c
fs/btrfs/ordered-data.c
fs/btrfs/print-tree.c
fs/btrfs/raid56.c
fs/btrfs/scrub.c
fs/btrfs/super.c
fs/btrfs/sysfs.c
fs/btrfs/sysfs.h
fs/btrfs/transaction.c
fs/btrfs/volumes.c
fs/btrfs/volumes.h
fs/btrfs/zlib.c
fs/cifs/cifs_unicode.c
fs/cifs/cifsfs.c
fs/cifs/link.c
fs/coredump.c
fs/direct-io.c
fs/eventpoll.c
fs/ext4/balloc.c
fs/ext4/extents_status.c
fs/ext4/ialloc.c
fs/ext4/indirect.c
fs/ext4/mballoc.c
fs/ext4/super.c
fs/f2fs/data.c
fs/f2fs/dir.c
fs/f2fs/f2fs.h
fs/f2fs/file.c
fs/f2fs/inode.c
fs/f2fs/namei.c
fs/f2fs/node.c
fs/f2fs/segment.c
fs/f2fs/super.c
fs/fuse/dev.c
fs/fuse/dir.c
fs/fuse/file.c
fs/fuse/inode.c
fs/gfs2/file.c
fs/gfs2/glock.c
fs/gfs2/glops.c
fs/gfs2/lock_dlm.c
fs/gfs2/rgrp.c
fs/jbd2/transaction.c
fs/kernfs/file.c
fs/kernfs/mount.c
fs/locks.c
fs/mbcache.c
fs/namei.c
fs/nfs/direct.c
fs/nfs/inode.c
fs/nfs/internal.h
fs/nfs/nfs3acl.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4_fs.h
fs/nfs/nfs4namespace.c
fs/nfs/nfs4proc.c
fs/nfs/pagelist.c
fs/nfs/write.c
fs/nfsd/nfs4proc.c
fs/nfsd/nfs4state.c
fs/nfsd/nfs4xdr.c
fs/ocfs2/dlm/dlmcommon.h
fs/ocfs2/dlm/dlmmaster.c
fs/ocfs2/dlm/dlmrecovery.c
fs/ocfs2/dlm/dlmthread.c
fs/ocfs2/dlm/dlmunlock.c
fs/ocfs2/namei.c
fs/ocfs2/ocfs2_trace.h
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
fs/proc/stat.c
fs/quota/dquot.c
fs/seq_file.c
fs/xattr.c
fs/xfs/xfs_bmap.c
fs/xfs/xfs_bmap.h
fs/xfs/xfs_bmap_util.c
fs/xfs/xfs_bmap_util.h
fs/xfs/xfs_btree.c
fs/xfs/xfs_iomap.c
fs/xfs/xfs_sb.c
include/acpi/processor.h
include/acpi/video.h
include/asm-generic/vmlinux.lds.h
include/drm/i915_pciids.h
include/drm/i915_powerwell.h
include/dt-bindings/clock/exynos5420.h
include/dt-bindings/clock/imx6sl-clock.h
include/dt-bindings/clock/stih415-clks.h
include/dt-bindings/clock/stih416-clks.h
include/dt-bindings/pinctrl/dra.h
include/linux/arcdevice.h
include/linux/bio.h
include/linux/blk-mq.h
include/linux/blkdev.h
include/linux/cpufreq.h
include/linux/crc32.h
include/linux/elevator.h
include/linux/filter.h
include/linux/fs.h
include/linux/if_bridge.h
include/linux/ipv6.h
include/linux/isdn_ppp.h
include/linux/kernel.h
include/linux/kernfs.h
include/linux/libata.h
include/linux/mlx4/device.h
include/linux/mlx5/device.h
include/linux/mlx5/driver.h
include/linux/mutex.h
include/linux/netdev_features.h
include/linux/netdevice.h
include/linux/nmi.h
include/linux/of_fdt.h
include/linux/of_mdio.h
include/linux/osq_lock.h [new file with mode: 0644]
include/linux/page-flags.h
include/linux/pagemap.h
include/linux/percpu-defs.h
include/linux/phy.h
include/linux/profile.h
include/linux/ptp_classify.h
include/linux/ptrace.h
include/linux/rcupdate.h
include/linux/regulator/consumer.h
include/linux/rhashtable.h [new file with mode: 0644]
include/linux/rndis.h
include/linux/rtnetlink.h
include/linux/rwsem-spinlock.h
include/linux/rwsem.h
include/linux/sched.h
include/linux/skbuff.h
include/linux/socket.h
include/linux/spi/cc2520.h [new file with mode: 0644]
include/linux/stmmac.h
include/linux/suspend.h
include/linux/tcp.h
include/linux/uio.h
include/linux/usb_usual.h
include/net/dcbnl.h
include/net/flow_keys.h
include/net/if_inet6.h
include/net/inet_frag.h
include/net/inet_sock.h
include/net/inet_timewait_sock.h
include/net/ip.h
include/net/ipv6.h
include/net/mac802154.h
include/net/neighbour.h
include/net/netfilter/nf_conntrack_ecache.h
include/net/netfilter/nf_log.h
include/net/netfilter/nf_tables.h
include/net/netfilter/xt_log.h [deleted file]
include/net/netlink.h
include/net/netns/conntrack.h
include/net/netns/ieee802154_6lowpan.h
include/net/netns/ipv6.h
include/net/netns/nftables.h
include/net/netns/x_tables.h
include/net/rtnetlink.h
include/net/sch_generic.h
include/net/sctp/command.h
include/net/sctp/constants.h
include/net/sctp/sctp.h
include/net/sctp/structs.h
include/net/sctp/ulpevent.h
include/net/sock.h
include/net/tcp.h
include/net/udp.h
include/net/udp_tunnel.h [new file with mode: 0644]
include/net/vxlan.h
include/scsi/scsi_cmnd.h
include/scsi/scsi_device.h
include/sound/core.h
include/trace/ftrace.h
include/trace/syscall.h
include/uapi/linux/btrfs.h
include/uapi/linux/can/netlink.h
include/uapi/linux/dcbnl.h
include/uapi/linux/fuse.h
include/uapi/linux/if_link.h
include/uapi/linux/if_packet.h
include/uapi/linux/in6.h
include/uapi/linux/ipv6.h
include/uapi/linux/netdevice.h
include/uapi/linux/netfilter/nf_tables.h
include/uapi/linux/netfilter/xt_bpf.h
include/uapi/linux/netfilter_bridge/Kbuild
include/uapi/linux/netfilter_bridge/ebt_ulog.h [deleted file]
include/uapi/linux/netfilter_ipv4/Kbuild
include/uapi/linux/netfilter_ipv4/ipt_ULOG.h [deleted file]
include/uapi/linux/openvswitch.h
include/uapi/linux/perf_event.h
include/uapi/linux/sctp.h
include/uapi/linux/sysctl.h
include/uapi/linux/tipc_config.h
include/uapi/linux/usb/functionfs.h
include/uapi/sound/compress_offload.h
include/uapi/sound/compress_params.h
include/xen/grant_table.h
kernel/Kconfig.locks
kernel/Makefile
kernel/bpf/Makefile [new file with mode: 0644]
kernel/bpf/core.c [new file with mode: 0644]
kernel/cgroup.c
kernel/context_tracking.c
kernel/cpuset.c
kernel/events/core.c
kernel/events/uprobes.c
kernel/fork.c
kernel/irq/irqdesc.c
kernel/kexec.c
kernel/kprobes.c
kernel/locking/mcs_spinlock.c
kernel/locking/mcs_spinlock.h
kernel/locking/mutex.c
kernel/locking/rtmutex-debug.h
kernel/locking/rtmutex.c
kernel/locking/rtmutex.h
kernel/locking/rwsem-spinlock.c
kernel/locking/rwsem-xadd.c
kernel/locking/rwsem.c
kernel/power/hibernate.c
kernel/power/main.c
kernel/power/process.c
kernel/power/suspend.c
kernel/power/user.c
kernel/printk/printk.c
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/update.c
kernel/sched/core.c
kernel/sched/debug.c
kernel/seccomp.c
kernel/smp.c
kernel/sysctl.c
kernel/sysctl_binary.c
kernel/time/alarmtimer.c
kernel/trace/ftrace.c
kernel/trace/ring_buffer.c
kernel/trace/trace.c
kernel/trace/trace_clock.c
kernel/trace/trace_events.c
kernel/trace/trace_uprobe.c
kernel/tracepoint.c
kernel/watchdog.c
kernel/workqueue.c
lib/Kconfig.debug
lib/Makefile
lib/cpumask.c
lib/crc32.c
lib/dynamic_debug.c
lib/iovec.c
lib/lz4/lz4_decompress.c
lib/lzo/lzo1x_decompress_safe.c
lib/net_utils.c
lib/random32.c
lib/rhashtable.c [new file with mode: 0644]
lib/swiotlb.c
lib/test_bpf.c
mm/huge_memory.c
mm/hugetlb.c
mm/ksm.c
mm/memory-failure.c
mm/memory.c
mm/mempolicy.c
mm/migrate.c
mm/msync.c
mm/nommu.c
mm/page_alloc.c
mm/rmap.c
mm/shmem.c
mm/slab.c
mm/slab_common.c
mm/slub.c
mm/truncate.c
net/6lowpan/iphc.c
net/802/fc.c
net/802/fddi.c
net/802/hippi.c
net/8021q/vlan.c
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlanproc.c
net/9p/client.c
net/appletalk/ddp.c
net/appletalk/dev.c
net/atm/br2684.c
net/atm/clip.c
net/batman-adv/bridge_loop_avoidance.c
net/batman-adv/soft-interface.c
net/batman-adv/sysfs.c
net/batman-adv/translation-table.c
net/batman-adv/types.h
net/bluetooth/6lowpan.c
net/bluetooth/bnep/core.c
net/bridge/br_fdb.c
net/bridge/br_if.c
net/bridge/br_multicast.c
net/bridge/br_netlink.c
net/bridge/br_private.h
net/bridge/netfilter/Kconfig
net/bridge/netfilter/Makefile
net/bridge/netfilter/ebt_log.c
net/bridge/netfilter/ebt_ulog.c [deleted file]
net/bridge/netfilter/nf_log_bridge.c [new file with mode: 0644]
net/bridge/netfilter/nft_reject_bridge.c [new file with mode: 0644]
net/caif/caif_socket.c
net/caif/cfctrl.c
net/compat.c
net/core/dev.c
net/core/drop_monitor.c
net/core/dst.c
net/core/filter.c
net/core/flow_dissector.c
net/core/iovec.c
net/core/neighbour.c
net/core/net-sysfs.c
net/core/netpoll.c
net/core/pktgen.c
net/core/ptp_classifier.c
net/core/request_sock.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/core/sock_diag.c
net/core/timestamping.c
net/dcb/dcbnl.c
net/dccp/ipv6.c
net/dccp/minisocks.c
net/dns_resolver/dns_query.c
net/dsa/dsa.c
net/dsa/slave.c
net/ethernet/eth.c
net/hsr/Makefile
net/hsr/hsr_device.c
net/hsr/hsr_device.h
net/hsr/hsr_forward.c [new file with mode: 0644]
net/hsr/hsr_forward.h [new file with mode: 0644]
net/hsr/hsr_framereg.c
net/hsr/hsr_framereg.h
net/hsr/hsr_main.c
net/hsr/hsr_main.h
net/hsr/hsr_netlink.c
net/hsr/hsr_netlink.h
net/hsr/hsr_slave.c [new file with mode: 0644]
net/hsr/hsr_slave.h [new file with mode: 0644]
net/ieee802154/6lowpan_rtnl.c
net/ieee802154/af_ieee802154.c
net/ieee802154/dgram.c
net/ieee802154/ieee802154.h
net/ieee802154/netlink.c
net/ieee802154/nl-mac.c
net/ieee802154/nl-phy.c
net/ieee802154/raw.c
net/ieee802154/reassembly.c
net/ieee802154/wpan-class.c
net/ipv4/Kconfig
net/ipv4/Makefile
net/ipv4/af_inet.c
net/ipv4/datagram.c
net/ipv4/devinet.c
net/ipv4/gre_demux.c
net/ipv4/gre_offload.c
net/ipv4/icmp.c
net/ipv4/igmp.c
net/ipv4/inet_fragment.c
net/ipv4/ip_fragment.c
net/ipv4/ip_options.c
net/ipv4/ip_output.c
net/ipv4/ip_sockglue.c
net/ipv4/ip_tunnel.c
net/ipv4/ip_vti.c
net/ipv4/ipconfig.c
net/ipv4/ipmr.c
net/ipv4/netfilter/Kconfig
net/ipv4/netfilter/Makefile
net/ipv4/netfilter/ipt_ULOG.c [deleted file]
net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
net/ipv4/netfilter/nf_conntrack_proto_icmp.c
net/ipv4/netfilter/nf_defrag_ipv4.c
net/ipv4/netfilter/nf_log_arp.c [new file with mode: 0644]
net/ipv4/netfilter/nf_log_ipv4.c [new file with mode: 0644]
net/ipv4/netfilter/nf_nat_l3proto_ipv4.c
net/ipv4/netfilter/nf_nat_proto_gre.c
net/ipv4/netfilter/nf_nat_proto_icmp.c
net/ipv4/ping.c
net/ipv4/proc.c
net/ipv4/raw.c
net/ipv4/route.c
net/ipv4/syncookies.c
net/ipv4/tcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_input.c
net/ipv4/tcp_ipv4.c
net/ipv4/tcp_metrics.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_offload.c
net/ipv4/tcp_output.c
net/ipv4/udp.c
net/ipv4/udp_offload.c
net/ipv4/udp_tunnel.c [new file with mode: 0644]
net/ipv4/xfrm4_protocol.c
net/ipv6/addrconf.c
net/ipv6/af_inet6.c
net/ipv6/datagram.c
net/ipv6/icmp.c
net/ipv6/ip6_gre.c
net/ipv6/ip6_output.c
net/ipv6/ip6_tunnel.c
net/ipv6/ip6_vti.c
net/ipv6/ip6mr.c
net/ipv6/ipv6_sockglue.c
net/ipv6/mcast.c
net/ipv6/ndisc.c
net/ipv6/netfilter/Kconfig
net/ipv6/netfilter/Makefile
net/ipv6/netfilter/ip6t_ipv6header.c
net/ipv6/netfilter/nf_conntrack_reasm.c
net/ipv6/netfilter/nf_log_ipv6.c [new file with mode: 0644]
net/ipv6/netfilter/nf_nat_l3proto_ipv6.c
net/ipv6/proc.c
net/ipv6/raw.c
net/ipv6/reassembly.c
net/ipv6/sit.c
net/ipv6/syncookies.c
net/ipv6/sysctl_net_ipv6.c
net/ipv6/tcp_ipv6.c
net/ipv6/tcpv6_offload.c
net/ipv6/udp.c
net/irda/af_irda.c
net/irda/irda_device.c
net/irda/irlan/irlan_common.c
net/irda/irlan/irlan_eth.c
net/irda/irlmp.c
net/iucv/af_iucv.c
net/key/af_key.c
net/l2tp/Kconfig
net/l2tp/l2tp_core.c
net/l2tp/l2tp_eth.c
net/l2tp/l2tp_ip6.c
net/l2tp/l2tp_ppp.c
net/mac80211/iface.c
net/mac80211/mesh_hwmp.c
net/mac802154/ieee802154_dev.c
net/mac802154/llsec.c
net/mac802154/mib.c
net/mac802154/tx.c
net/netfilter/Kconfig
net/netfilter/Makefile
net/netfilter/ipvs/ip_vs_conn.c
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/ipvs/ip_vs_sync.c
net/netfilter/nf_conntrack_core.c
net/netfilter/nf_conntrack_ecache.c
net/netfilter/nf_conntrack_netlink.c
net/netfilter/nf_log.c
net/netfilter/nf_log_common.c [new file with mode: 0644]
net/netfilter/nf_nat_core.c
net/netfilter/nf_nat_proto_common.c
net/netfilter/nf_nat_proto_dccp.c
net/netfilter/nf_nat_proto_sctp.c
net/netfilter/nf_nat_proto_tcp.c
net/netfilter/nf_nat_proto_udp.c
net/netfilter/nf_nat_proto_udplite.c
net/netfilter/nf_tables_api.c
net/netfilter/nf_tables_core.c
net/netfilter/nfnetlink_acct.c
net/netfilter/nfnetlink_log.c
net/netfilter/nft_compat.c
net/netfilter/nft_hash.c
net/netfilter/nft_log.c
net/netfilter/nft_nat.c
net/netfilter/x_tables.c
net/netfilter/xt_LED.c
net/netfilter/xt_LOG.c
net/netfilter/xt_bpf.c
net/netfilter/xt_hashlimit.c
net/netlabel/netlabel_kapi.c
net/netlink/af_netlink.c
net/netlink/af_netlink.h
net/netlink/diag.c
net/netrom/af_netrom.c
net/nfc/digital_dep.c
net/openvswitch/actions.c
net/openvswitch/datapath.c
net/openvswitch/datapath.h
net/openvswitch/flow.c
net/openvswitch/flow.h
net/openvswitch/flow_table.c
net/openvswitch/flow_table.h
net/openvswitch/vport-gre.c
net/openvswitch/vport-internal_dev.c
net/openvswitch/vport-internal_dev.h
net/openvswitch/vport-vxlan.c
net/openvswitch/vport.c
net/openvswitch/vport.h
net/packet/af_packet.c
net/phonet/pep-gprs.c
net/rose/af_rose.c
net/rxrpc/ar-key.c
net/sched/act_mirred.c
net/sched/cls_api.c
net/sched/cls_bpf.c
net/sched/cls_tcindex.c
net/sched/cls_u32.c
net/sched/em_canid.c
net/sched/sch_generic.c
net/sched/sch_teql.c
net/sctp/Makefile
net/sctp/associola.c
net/sctp/command.c [deleted file]
net/sctp/input.c
net/sctp/ipv6.c
net/sctp/output.c
net/sctp/outqueue.c
net/sctp/protocol.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/sysctl.c
net/sctp/transport.c
net/sctp/ulpevent.c
net/socket.c
net/sunrpc/auth.c
net/tipc/bcast.c
net/tipc/bcast.h
net/tipc/link.c
net/tipc/link.h
net/tipc/msg.c
net/tipc/msg.h
net/tipc/name_distr.c
net/tipc/name_distr.h
net/tipc/net.c
net/tipc/net.h
net/tipc/node.c
net/tipc/node.h
net/tipc/node_subscr.c
net/tipc/port.c
net/tipc/port.h
net/tipc/socket.c
net/tipc/socket.h
net/xfrm/xfrm_policy.c
net/xfrm/xfrm_user.c
samples/trace_events/trace-events-sample.h
scripts/checkpatch.pl
scripts/kernel-doc
scripts/package/builddeb
scripts/package/buildtar
scripts/recordmcount.h
sound/core/control.c
sound/core/init.c
sound/firewire/bebob/bebob_maudio.c
sound/pci/hda/hda_auto_parser.c
sound/pci/hda/hda_controller.c
sound/pci/hda/hda_i915.c
sound/pci/hda/hda_i915.h
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/hda_priv.h
sound/pci/hda/hda_tegra.c
sound/pci/hda/patch_hdmi.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/soc/codecs/Kconfig
sound/soc/codecs/Makefile
sound/soc/codecs/sigmadsp-i2c.c [new file with mode: 0644]
sound/soc/codecs/sigmadsp-regmap.c [new file with mode: 0644]
sound/soc/codecs/sigmadsp.c
sound/soc/codecs/sigmadsp.h
sound/soc/fsl/fsl_dma.c
sound/soc/fsl/fsl_spdif.c
sound/soc/fsl/imx-pcm-dma.c
sound/soc/pxa/Kconfig
sound/soc/sh/rcar/core.c
sound/soc/soc-dapm.c
sound/usb/card.c
sound/usb/endpoint.c
sound/usb/endpoint.h
tools/lib/lockdep/include/liblockdep/mutex.h
tools/lib/lockdep/include/liblockdep/rwlock.h
tools/lib/lockdep/preload.c
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/event-plugin.c
tools/lib/traceevent/plugin_function.c
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-timechart.txt
tools/perf/Makefile.perf
tools/perf/builtin-inject.c
tools/perf/builtin-probe.c
tools/perf/config/Makefile
tools/perf/perf.c
tools/perf/tests/builtin-test.c
tools/perf/tests/dso-data.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/make
tools/perf/tests/tests.h
tools/perf/ui/browsers/hists.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evsel.c
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/machine.c
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/perf_regs.c
tools/perf/util/perf_regs.h
tools/perf/util/probe-event.c
tools/perf/util/probe-finder.c
tools/perf/util/scripting-engines/trace-event-perl.c
tools/perf/util/scripting-engines/trace-event-python.c
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/unwind-libunwind.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/testing/selftests/cpu-hotplug/Makefile
tools/testing/selftests/ipc/msgque.c
tools/testing/selftests/memory-hotplug/Makefile
tools/testing/selftests/powerpc/tm/Makefile
tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
tools/thermal/tmon/Makefile
tools/thermal/tmon/tmon.c
tools/usb/ffs-test.c
virt/kvm/arm/vgic.c

diff --git a/CREDITS b/CREDITS
index c322dcf..28ee151 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -9,6 +9,10 @@
                        Linus
 ----------
 
+M: Matt Mackal
+E: mpm@selenic.com
+D: SLOB slab allocator
+
 N: Matti Aarnio
 E: mea@nic.funet.fi
 D: Alpha systems hacking, IPv6 and other network related stuff
index 416c5d5..d322b05 100644 (file)
@@ -1,3 +1,14 @@
+What:          /sys/class/net/<iface>/name_assign_type
+Date:          July 2014
+KernelVersion: 3.17
+Contact:       netdev@vger.kernel.org
+Description:
+               Indicates the name assignment type. Possible values are:
+               1: enumerated by the kernel, possibly in an unpredictable way
+               2: predictably named by the kernel
+               3: named by userspace
+               4: renamed
+
 What:          /sys/class/net/<iface>/addr_assign_type
 Date:          July 2010
 KernelVersion: 3.2
index 2254db0..227bec8 100644 (file)
@@ -280,12 +280,9 @@ that is possible.
 mcelog
 ------
 
-In Linux 2.6.31+ the i386 kernel needs to run the mcelog utility
-as a regular cronjob similar to the x86-64 kernel to process and log
-machine check events when CONFIG_X86_NEW_MCE is enabled. Machine check
-events are errors reported by the CPU. Processing them is strongly encouraged.
-All x86-64 kernels since 2.6.4 require the mcelog utility to
-process machine checks.
+On x86 kernels the mcelog utility is needed to process and log machine check
+events when CONFIG_X86_MCE is enabled. Machine check events are errors reported
+by the CPU. Processing them is strongly encouraged.
 
 Getting updated software
 ========================
index 4017f14..2c425d7 100644 (file)
@@ -708,7 +708,7 @@ hardware level details could be very different.
 
 <para>Systems need specialized hardware support to implement OTG,
 notably including a special <emphasis>Mini-AB</emphasis> jack
-and associated transciever to support <emphasis>Dual-Role</emphasis>
+and associated transceiver to support <emphasis>Dual-Role</emphasis>
 operation:
 they can act either as a host, using the standard
 Linux-USB host side driver stack,
index 46347f6..59fb5c0 100644 (file)
        <para>
        Each interrupt is described by an interrupt descriptor structure
        irq_desc. The interrupt is referenced by an 'unsigned int' numeric
-       value which selects the corresponding interrupt decription structure
+       value which selects the corresponding interrupt description structure
        in the descriptor structures array.
        The descriptor structure contains status information and pointers
        to the interrupt flow method and the interrupt chip structure
@@ -470,7 +470,7 @@ if (desc->irq_data.chip->irq_eoi)
      <para>
        To avoid copies of identical implementations of IRQ chips the
        core provides a configurable generic interrupt chip
-       implementation. Developers should check carefuly whether the
+       implementation. Developers should check carefully whether the
        generic chip fits their needs before implementing the same
        functionality slightly differently themselves.
      </para>
index 19f2a5a..e584ee1 100644 (file)
@@ -1760,7 +1760,7 @@ as it would be on UP.
 </para>
 
 <para>
-There is a furthur optimization possible here: remember our original
+There is a further optimization possible here: remember our original
 cache code, where there were no reference counts and the caller simply
 held the lock whenever using the object?  This is still possible: if
 you hold the lock, no one can delete the object, so you don't need to
index deb71ba..d7fcdc5 100644 (file)
@@ -677,7 +677,7 @@ and other resources, etc.
 
        <listitem>
        <para>
-       ATA_QCFLAG_ACTIVE is clared from qc->flags.
+       ATA_QCFLAG_ACTIVE is cleared from qc->flags.
        </para>
        </listitem>
 
@@ -708,7 +708,7 @@ and other resources, etc.
 
           <listitem>
           <para>
-          qc->waiting is claread &amp; completed (in that order).
+          qc->waiting is cleared &amp; completed (in that order).
           </para>
           </listitem>
 
@@ -1163,7 +1163,7 @@ and other resources, etc.
 
        <para>
        Once sense data is acquired, this type of errors can be
-       handled similary to other SCSI errors.  Note that sense data
+       handled similarly to other SCSI errors.  Note that sense data
        may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
        &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR).  In such
        cases, the error should be considered as an ATA bus error and
index 1d27f0a..639e748 100644 (file)
@@ -202,8 +202,8 @@ $(MEDIA_OBJ_DIR)/%: $(MEDIA_SRC_DIR)/%.b64
 
 $(MEDIA_OBJ_DIR)/v4l2.xml: $(OBJIMGFILES)
        @$($(quiet)gen_xml)
-       @(ln -sf $(MEDIA_SRC_DIR)/v4l/*xml $(MEDIA_OBJ_DIR)/)
-       @(ln -sf $(MEDIA_SRC_DIR)/dvb/*xml $(MEDIA_OBJ_DIR)/)
+       @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/v4l/*xml $(MEDIA_OBJ_DIR)/)
+       @(ln -sf `cd $(MEDIA_SRC_DIR) && /bin/pwd`/dvb/*xml $(MEDIA_OBJ_DIR)/)
 
 $(MEDIA_OBJ_DIR)/videodev2.h.xml: $(srctree)/include/uapi/linux/videodev2.h $(MEDIA_OBJ_DIR)/v4l2.xml
        @$($(quiet)gen_xml)
index 4decb46..03f9a1f 100644 (file)
@@ -68,7 +68,7 @@
                several digital tv standards. While it is called as DVB API,
                in fact it covers several different video standards including
                DVB-T, DVB-S, DVB-C and ATSC. The API is currently being updated
-               to documment support also for DVB-S2, ISDB-T and ISDB-S.</para>
+               to document support also for DVB-S2, ISDB-T and ISDB-S.</para>
        <para>The third part covers the Remote Controller API.</para>
        <para>The fourth part covers the Media Controller API.</para>
        <para>For additional information and for the latest development code,
index cd11926..7da8f04 100644 (file)
@@ -91,7 +91,7 @@
                <listitem><para>
                [MTD Interface]</para><para>
                These functions provide the interface to the MTD kernel API. 
-               They are not replacable and provide functionality
+               They are not replaceable and provide functionality
                which is complete hardware independent.
                </para></listitem>
                <listitem><para>
                </para></listitem>
                <listitem><para>
                [GENERIC]</para><para>
-               Generic functions are not replacable and provide functionality
+               Generic functions are not replaceable and provide functionality
                which is complete hardware independent.
                </para></listitem>
                <listitem><para>
                [DEFAULT]</para><para>
                Default functions provide hardware related functionality which is suitable
                for most of the implementations. These functions can be replaced by the
-               board driver if neccecary. Those functions are called via pointers in the
+               board driver if necessary. Those functions are called via pointers in the
                NAND chip description structure. The board driver can set the functions which
                should be replaced by board dependent functions before calling nand_scan().
                If the function pointer is NULL on entry to nand_scan() then the pointer
@@ -264,7 +264,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
                        is set up nand_scan() is called. This function tries to
                        detect and identify then chip. If a chip is found all the
                        internal data fields are initialized accordingly.
-                       The structure(s) have to be zeroed out first and then filled with the neccecary 
+                       The structure(s) have to be zeroed out first and then filled with the necessary
                        information about the device.
                </para>
                <programlisting>
@@ -327,7 +327,7 @@ module_init(board_init);
        <sect1 id="Exit_function">
                <title>Exit function</title>
                <para>
-                       The exit function is only neccecary if the driver is
+                       The exit function is only necessary if the driver is
                        compiled as a module. It releases all resources which
                        are held by the chip driver and unregisters the partitions
                        in the MTD layer.
@@ -494,7 +494,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                in this case. See rts_from4.c and diskonchip.c for 
                                implementation reference. In those cases we must also
                                use bad block tables on FLASH, because the ECC layout is
-                               interferring with the bad block marker positions.
+                               interfering with the bad block marker positions.
                                See bad block table support for details.
                        </para>
                </sect2>
@@ -542,7 +542,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                <para>  
                        nand_scan() calls the function nand_default_bbt(). 
                        nand_default_bbt() selects appropriate default
-                       bad block table desriptors depending on the chip information
+                       bad block table descriptors depending on the chip information
                        which was retrieved by nand_scan().
                </para>
                <para>
@@ -554,7 +554,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                <sect2 id="Flash_based_tables">
                        <title>Flash based tables</title>
                        <para>
-                               It may be desired or neccecary to keep a bad block table in FLASH. 
+                               It may be desired or necessary to keep a bad block table in FLASH.
                                For AG-AND chips this is mandatory, as they have no factory marked
                                bad blocks. They have factory marked good blocks. The marker pattern
                                is erased when the block is erased to be reused. So in case of
@@ -565,10 +565,10 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                of the blocks.
                        </para>
                        <para>
-                               The blocks in which the tables are stored are procteted against
+                               The blocks in which the tables are stored are protected against
                                accidental access by marking them bad in the memory bad block
                                table. The bad block table management functions are allowed
-                               to circumvernt this protection.
+                               to circumvent this protection.
                        </para>
                        <para>
                                The simplest way to activate the FLASH based bad block table support 
@@ -592,7 +592,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                User defined tables are created by filling out a 
                                nand_bbt_descr structure and storing the pointer in the
                                nand_chip structure member bbt_td before calling nand_scan(). 
-                               If a mirror table is neccecary a second structure must be
+                               If a mirror table is necessary a second structure must be
                                created and a pointer to this structure must be stored
                                in bbt_md inside the nand_chip structure. If the bbt_md 
                                member is set to NULL then only the main table is used
@@ -666,7 +666,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
                                <para>
                                For automatic placement some blocks must be reserved for
                                bad block table storage. The number of reserved blocks is defined 
-                               in the maxblocks member of the babd block table description structure.
+                               in the maxblocks member of the bad block table description structure.
                                Reserving 4 blocks for mirrored tables should be a reasonable number. 
                                This also limits the number of blocks which are scanned for the bad
                                block table ident pattern.
@@ -1068,11 +1068,11 @@ in this page</entry>
   <chapter id="filesystems">
        <title>Filesystem support</title>
        <para>
-               The NAND driver provides all neccecary functions for a
+               The NAND driver provides all necessary functions for a
                filesystem via the MTD interface.
        </para>
        <para>
-               Filesystems must be aware of the NAND pecularities and
+               Filesystems must be aware of the NAND peculiarities and
                restrictions. One major restrictions of NAND Flash is, that you cannot 
                write as often as you want to a page. The consecutive writes to a page, 
                before erasing it again, are restricted to 1-3 writes, depending on the 
@@ -1222,7 +1222,7 @@ in this page</entry>
 #define NAND_BBT_VERSION       0x00000100
 /* Create a bbt if none axists */
 #define NAND_BBT_CREATE                0x00000200
-/* Write bbt if neccecary */
+/* Write bbt if necessary */
 #define NAND_BBT_WRITE         0x00001000
 /* Read and write back block contents when writing bbt */
 #define NAND_BBT_SAVECONTENT   0x00002000
index 346e552..3b08a08 100644 (file)
        release regulators.  Functions are
        provided to <link linkend='API-regulator-enable'>enable</link>
        and <link linkend='API-regulator-disable'>disable</link> the
-       reguator and to get and set the runtime parameters of the
+       regulator and to get and set the runtime parameters of the
        regulator.
      </para>
      <para>
index 9561815..bbe9c1f 100644 (file)
@@ -766,10 +766,10 @@ framework to set up sysfs files for this region. Simply leave it alone.
        <para>
        The dynamic memory regions will be allocated when the UIO device file,
        <varname>/dev/uioX</varname> is opened.
-       Simiar to static memory resources, the memory region information for
+       Similar to static memory resources, the memory region information for
        dynamic regions is then visible via sysfs at
        <varname>/sys/class/uio/uioX/maps/mapY/*</varname>.
-       The dynmaic memory regions will be freed when the UIO device file is
+       The dynamic memory regions will be freed when the UIO device file is
        closed. When no processes are holding the device file open, the address
        returned to userspace is ~0.
        </para>
index 8d57c18..85fc0e2 100644 (file)
 
        <listitem><para>The Linux USB API supports synchronous calls for
        control and bulk messages.
-       It also supports asynchnous calls for all kinds of data transfer,
+       It also supports asynchronous calls for all kinds of data transfer,
        using request structures called "URBs" (USB Request Blocks).
        </para></listitem>
 
index d0056a4..6f639d9 100644 (file)
@@ -5696,7 +5696,7 @@ struct _snd_pcm_runtime {
        suspending the PCM operations via
        <function>snd_pcm_suspend_all()</function> or
        <function>snd_pcm_suspend()</function>.  It means that the PCM
-       streams are already stoppped when the register snapshot is
+       streams are already stopped when the register snapshot is
        taken.  But, remember that you don't have to restart the PCM
        stream in the resume callback. It'll be restarted via 
        trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant>
index c6a06b7..f405780 100644 (file)
@@ -314,6 +314,7 @@ int main(int argc, char *argv[])
                        break;
                case 'm':
                        strncpy(cpumask, optarg, sizeof(cpumask));
+                       cpumask[sizeof(cpumask) - 1] = '\0';
                        maskset = 1;
                        printf("cpumask %s maskset %d\n", cpumask, maskset);
                        break;
index fd786ea..e182be5 100644 (file)
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
 configuring GPIOs it can get its ACPI handle and extract this information
 from ACPI tables.
 
-Currently the kernel is not able to automatically determine from which ACPI
-device it should make the corresponding platform device so we need to add
-the ACPI device explicitly to acpi_platform_device_ids list defined in
-drivers/acpi/acpi_platform.c. This limitation is only for the platform
-devices, SPI and I2C devices are created automatically as described below.
-
 DMA support
 ~~~~~~~~~~~
 DMA controllers enumerated via ACPI should be registered in the system to
index e742d21..a69ffe1 100644 (file)
@@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to
 /sys/devices/system/cpu/intel_pstate/
 
       max_perf_pct: limits the maximum P state that will be requested by
-      the driver stated as a percentage of the available performance.
+      the driver stated as a percentage of the available performance. The
+      available (P states) performance may be reduced by the no_turbo
+      setting described below.
 
       min_perf_pct: limits the minimum P state that will be  requested by
-      the driver stated as a percentage of the available performance.
+      the driver stated as a percentage of the max (non-turbo)
+      performance level.
 
       no_turbo: limits the driver to selecting P states below the turbo
       frequency range.
index 11f2330..ad9f8ed 100644 (file)
@@ -6,5 +6,15 @@ following property:
 
 Required root node property:
 
- - compatible: must contain either "marvell,armada380" or
-   "marvell,armada385" depending on the variant of the SoC being used.
+ - compatible: must contain "marvell,armada380"
+
+In addition, boards using the Marvell Armada 385 SoC shall have the
+following property before the previous one:
+
+Required root node property:
+
+compatible: must contain "marvell,armada385"
+
+Example:
+
+compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
index 5216b41..8b4f7b7 100644 (file)
@@ -9,6 +9,18 @@ Required Properties:
 - reg: physical base address of the controller and length of memory mapped
     region.
 
+Optional Properties:
+- clocks: List of clock handles. The parent clocks of the input clocks to the
+       devices in this power domain are set to oscclk before power gating
+       and restored back after powering on a domain. This is required for
+       all domains which are powered on and off and not required for unused
+       domains.
+- clock-names: The following clocks can be specified:
+       - oscclk: Oscillator clock.
+       - pclkN, clkN: Pairs of parent of input clock and input clock to the
+               devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
+               are supported currently.
+
 Node of a device using power domains must have a samsung,power-domain property
 defined with a phandle to respective power domain.
 
@@ -19,6 +31,14 @@ Example:
                reg = <0x10023C00 0x10>;
        };
 
+       mfc_pd: power-domain@10044060 {
+               compatible = "samsung,exynos4210-pd";
+               reg = <0x10044060 0x20>;
+               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
+                       <&clock CLK_MOUT_USER_ACLK333>;
+               clock-names = "oscclk", "pclk0", "clk0";
+       };
+
 Example of the node using power domain:
 
        node {
index b513cb8..af527ee 100644 (file)
@@ -40,6 +40,9 @@ Optional properties:
 - arm,filter-ranges : <start length> Starting address and length of window to
   filter. Addresses in the filter window are directed to the M1 port. Other
   addresses will go to the M0 port.
+- arm,io-coherent : indicates that the system is operating in an hardware
+  I/O coherent mode. Valid only when the arm,pl310-cache compatible
+  string is used.
 - interrupts : 1 combined interrupt.
 - cache-id-part: cache id part number to be used if it is not present
   on hardware
index 5d49f2b..832fe8c 100644 (file)
@@ -48,7 +48,7 @@ adc@12D10000 {
 
        /* NTC thermistor is a hwmon device */
        ncp15wb473@0 {
-               compatible = "ntc,ncp15wb473";
+               compatible = "murata,ncp15wb473";
                pullup-uv = <1800000>;
                pullup-ohm = <47000>;
                pulldown-ohm = <0>;
index f055515..366690c 100644 (file)
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
 under node /cpus/cpu@0.
 
 Required properties:
-- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt
-  for details
+- None
 
 Optional properties:
+- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
+  details. OPPs *must* be supplied either via DT, i.e. this property, or
+  populated at runtime.
 - clock-latency: Specify the possible maximum transition latency for clock,
   in unit of nanoseconds.
 - voltage-tolerance: Specify the CPU voltage tolerance in percentage.
index c6f6667..b117b2e 100644 (file)
@@ -3,11 +3,19 @@ NTC Thermistor hwmon sensors
 
 Requires node properties:
 - "compatible" value : one of
-       "ntc,ncp15wb473"
-       "ntc,ncp18wb473"
-       "ntc,ncp21wb473"
-       "ntc,ncp03wb473"
-       "ntc,ncp15wl333"
+       "murata,ncp15wb473"
+       "murata,ncp18wb473"
+       "murata,ncp21wb473"
+       "murata,ncp03wb473"
+       "murata,ncp15wl333"
+
+/* Usage of vendor name "ntc" is deprecated */
+<DEPRECATED>   "ntc,ncp15wb473"
+<DEPRECATED>   "ntc,ncp18wb473"
+<DEPRECATED>   "ntc,ncp21wb473"
+<DEPRECATED>   "ntc,ncp03wb473"
+<DEPRECATED>   "ntc,ncp15wl333"
+
 - "pullup-uv"  Pull up voltage in micro volts
 - "pullup-ohm" Pull up resistor value in ohms
 - "pulldown-ohm" Pull down resistor value in ohms
@@ -21,7 +29,7 @@ Read more about iio bindings at
 
 Example:
        ncp15wb473@0 {
-               compatible = "ntc,ncp15wb473";
+               compatible = "murata,ncp15wb473";
                pullup-uv = <1800000>;
                pullup-ohm = <47000>;
                pulldown-ohm = <0>;
diff --git a/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt b/Documentation/devicetree/bindings/i2c/i2c-rk3x.txt
new file mode 100644 (file)
index 0000000..dde6c22
--- /dev/null
@@ -0,0 +1,42 @@
+* Rockchip RK3xxx I2C controller
+
+This driver interfaces with the native I2C controller present in Rockchip
+RK3xxx SoCs.
+
+Required properties :
+
+ - reg : Offset and length of the register set for the device
+ - compatible : should be "rockchip,rk3066-i2c", "rockchip,rk3188-i2c" or
+               "rockchip,rk3288-i2c".
+ - interrupts : interrupt number
+ - clocks : parent clock
+
+Required on RK3066, RK3188 :
+
+ - rockchip,grf : the phandle of the syscon node for the general register
+                 file (GRF)
+ - on those SoCs an alias with the correct I2C bus ID (bit offset in the GRF)
+   is also required.
+
+Optional properties :
+
+ - clock-frequency : SCL frequency to use (in Hz). If omitted, 100kHz is used.
+
+Example:
+
+aliases {
+       i2c0 = &i2c0;
+}
+
+i2c0: i2c@2002d000 {
+       compatible = "rockchip,rk3188-i2c";
+       reg = <0x2002d000 0x1000>;
+       interrupts = <GIC_SPI 40 IRQ_TYPE_LEVEL_HIGH>;
+       #address-cells = <1>;
+       #size-cells = <0>;
+
+       rockchip,grf = <&grf>;
+
+       clock-names = "i2c";
+       clocks = <&cru PCLK_I2C0>;
+};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt b/Documentation/devicetree/bindings/i2c/i2c-sunxi-p2wi.txt
new file mode 100644 (file)
index 0000000..6b76548
--- /dev/null
@@ -0,0 +1,41 @@
+
+* Allwinner P2WI (Push/Pull 2 Wire Interface) controller
+
+Required properties :
+
+ - reg             : Offset and length of the register set for the device.
+ - compatible      : Should one of the following:
+                     - "allwinner,sun6i-a31-p2wi"
+ - interrupts      : The interrupt line connected to the P2WI peripheral.
+ - clocks          : The gate clk connected to the P2WI peripheral.
+ - resets          : The reset line connected to the P2WI peripheral.
+
+Optional properties :
+
+ - clock-frequency : Desired P2WI bus clock frequency in Hz. If not set the
+default frequency is 100kHz
+
+A P2WI may contain one child node encoding a P2WI slave device.
+
+Slave device properties:
+  Required properties:
+   - reg           : the I2C slave address used during the initialization
+                     process to switch from I2C to P2WI mode
+
+Example:
+
+       p2wi@01f03400 {
+               compatible = "allwinner,sun6i-a31-p2wi";
+               reg = <0x01f03400 0x400>;
+               interrupts = <0 39 4>;
+               clocks = <&apb0_gates 3>;
+               clock-frequency = <6000000>;
+               resets = <&apb0_rst 3>;
+
+               axp221: pmic@68 {
+                       compatible = "x-powers,axp221";
+                       reg = <0x68>;
+
+                       /* ... */
+               };
+       };
index d01ed63..42409bf 100644 (file)
@@ -8,10 +8,16 @@ Required properties:
    - SerDes integration registers (1/2)
    - SerDes integration registers (2/2)
 
+Optional properties:
+- amd,speed-set: Speed capabilities of the device
+    0 - 1GbE and 10GbE (default)
+    1 - 2.5GbE and 10GbE
+
 Example:
        xgbe_phy@e1240800 {
                compatible = "amd,xgbe-phy-seattle-v1a", "ethernet-phy-ieee802.3-c45";
                reg = <0 0xe1240800 0 0x00400>,
                      <0 0xe1250000 0 0x00060>,
                      <0 0xe1250080 0 0x00004>;
+               amd,speed-set = <0>;
        };
index ea0c790..41354f7 100644 (file)
@@ -8,16 +8,21 @@ Required properties:
 - interrupt-parent: Should be the phandle for the interrupt controller
   that services interrupts for this device
 - interrupts: Should contain the amd-xgbe interrupt
-- clocks: Should be the DMA clock for the amd-xgbe device (used for
-  calculating the correct Rx interrupt watchdog timer value on a DMA
-  channel for coalescing)
-- clock-names: Should be the name of the DMA clock, "dma_clk"
+- clocks:
+   - DMA clock for the amd-xgbe device (used for calculating the
+     correct Rx interrupt watchdog timer value on a DMA channel
+     for coalescing)
+   - PTP clock for the amd-xgbe device
+- clock-names: Should be the names of the clocks
+   - "dma_clk" for the DMA clock
+   - "ptp_clk" for the PTP clock
 - phy-handle: See ethernet.txt file in the same directory
 - phy-mode: See ethernet.txt file in the same directory
 
 Optional properties:
 - mac-address: mac address to be assigned to the device. Can be overridden
   by UEFI.
+- dma-coherent: Present if dma operations are coherent
 
 Example:
        xgbe@e0700000 {
@@ -26,8 +31,8 @@ Example:
                      <0 0xe0780000 0 0x80000>;
                interrupt-parent = <&gic>;
                interrupts = <0 325 4>;
-               clocks = <&xgbe_clk>;
-               clock-names = "dma_clk";
+               clocks = <&xgbe_dma_clk>, <&xgbe_ptp_clk>;
+               clock-names = "dma_clk", "ptp_clk";
                phy-handle = <&phy>;
                phy-mode = "xgmii";
                mac-address = [ 02 a1 a2 a3 a4 a5 ];
index c183ea9..aa7ad62 100644 (file)
@@ -4,7 +4,8 @@ Required properties:
 - compatible: should be one of "brcm,systemport-v1.00" or "brcm,systemport"
 - reg: address and length of the register set for the device.
 - interrupts: interrupts for the device, first cell must be for the the rx
-  interrupts, and the second cell should be for the transmit queues
+  interrupts, and the second cell should be for the transmit queues. An
+  optional third interrupt cell for Wake-on-LAN can be specified
 - local-mac-address: Ethernet MAC address (48 bits) of this adapter
 - phy-mode: Should be a string describing the PHY interface to the
   Ethernet switch/PHY, see Documentation/devicetree/bindings/net/ethernet.txt
index 72efaaf..0369e25 100644 (file)
@@ -1,8 +1,8 @@
-TI SoC Davinci MDIO Controller Device Tree Bindings
+TI SoC Davinci/Keystone2 MDIO Controller Device Tree Bindings
 ---------------------------------------------------
 
 Required properties:
-- compatible           : Should be "ti,davinci_mdio"
+- compatible           : Should be "ti,davinci_mdio" or "ti,keystone_mdio"
 - reg                  : physical base address and size of the davinci mdio
                          registers map
 - bus_freq             : Mdio Bus frequency
@@ -19,7 +19,7 @@ file.
 Examples:
 
        mdio: davinci_mdio@4A101000 {
-               compatible = "ti,cpsw";
+               compatible = "ti,davinci_mdio";
                reg = <0x4A101000 0x1000>;
                bus_freq = <1000000>;
        };
@@ -27,7 +27,7 @@ Examples:
 (or)
 
        mdio: davinci_mdio@4A101000 {
-               compatible = "ti,cpsw";
+               compatible = "ti,davinci_mdio";
                ti,hwmods = "davinci_mdio";
                bus_freq = <1000000>;
        };
diff --git a/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt b/Documentation/devicetree/bindings/net/ieee802154/cc2520.txt
new file mode 100644 (file)
index 0000000..0071883
--- /dev/null
@@ -0,0 +1,29 @@
+*CC2520 IEEE 802.15.4 Compatible Radio*
+
+Required properties:
+       - compatible:           should be "ti,cc2520"
+       - spi-max-frequency:    maximal bus speed (8000000), should be set to 4000000 depends
+                               sync or async operation mode
+       - reg:                  the chipselect index
+       - pinctrl-0:            pin control group to be used for this controller.
+       - pinctrl-names:        must contain a "default" entry.
+       - fifo-gpio:            GPIO spec for the FIFO pin
+       - fifop-gpio:           GPIO spec for the FIFOP pin
+       - sfd-gpio:             GPIO spec for the SFD pin
+       - cca-gpio:             GPIO spec for the CCA pin
+       - vreg-gpio:            GPIO spec for the VREG pin
+       - reset-gpio:           GPIO spec for the RESET pin
+Example:
+       cc2520@0 {
+               compatible = "ti,cc2520";
+               reg = <0>;
+               spi-max-frequency = <4000000>;
+               pinctrl-names = "default";
+               pinctrl-0 = <&cc2520_cape_pins>;
+               fifo-gpio = <&gpio1 18 0>;
+               fifop-gpio = <&gpio1 19 0>;
+               sfd-gpio = <&gpio1 13 0>;
+               cca-gpio = <&gpio1 16 0>;
+               vreg-gpio = <&gpio0 31 0>;
+               reset-gpio = <&gpio1 12 0>;
+       };
diff --git a/Documentation/devicetree/bindings/net/marvell-pp2.txt b/Documentation/devicetree/bindings/net/marvell-pp2.txt
new file mode 100644 (file)
index 0000000..aa4f423
--- /dev/null
@@ -0,0 +1,61 @@
+* Marvell Armada 375 Ethernet Controller (PPv2)
+
+Required properties:
+
+- compatible: should be "marvell,armada-375-pp2"
+- reg: addresses and length of the register sets for the device.
+  Must contain the following register sets:
+       - common controller registers
+       - LMS registers
+  In addition, at least one port register set is required.
+- clocks: a pointer to the reference clocks for this device, consequently:
+       - main controller clock
+       - GOP clock
+- clock-names: names of used clocks, must be "pp_clk" and "gop_clk".
+
+The ethernet ports are represented by subnodes. At least one port is
+required.
+
+Required properties (port):
+
+- interrupts: interrupt for the port
+- port-id: should be '0' or '1' for ethernet ports, and '2' for the
+           loopback port
+- phy-mode: See ethernet.txt file in the same directory
+
+Optional properties (port):
+
+- marvell,loopback: port is loopback mode
+- phy: a phandle to a phy node defining the PHY address (as the reg
+  property, a single integer). Note: if this property isn't present,
+  then fixed link is assumed, and the 'fixed-link' property is
+  mandatory.
+
+Example:
+
+ethernet@f0000 {
+       compatible = "marvell,armada-375-pp2";
+       reg = <0xf0000 0xa000>,
+             <0xc0000 0x3060>,
+             <0xc4000 0x100>,
+             <0xc5000 0x100>;
+       clocks = <&gateclk 3>, <&gateclk 19>;
+       clock-names = "pp_clk", "gop_clk";
+       status = "okay";
+
+       eth0: eth0@c4000 {
+               interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+               port-id = <0>;
+               status = "okay";
+               phy = <&phy0>;
+               phy-mode = "gmii";
+       };
+
+       eth1: eth1@c5000 {
+               interrupts = <GIC_SPI 41 IRQ_TYPE_LEVEL_HIGH>;
+               port-id = <1>;
+               status = "okay";
+               phy = <&phy3>;
+               phy-mode = "gmii";
+       };
+};
index e7106b5..34d4db1 100644 (file)
@@ -9,6 +9,7 @@ Required properties:
              "renesas,ether-r8a7779"  if the device is a part of R8A7779 SoC.
              "renesas,ether-r8a7790"  if the device is a part of R8A7790 SoC.
              "renesas,ether-r8a7791"  if the device is a part of R8A7791 SoC.
+             "renesas,ether-r8a7794"  if the device is a part of R8A7794 SoC.
              "renesas,ether-r7s72100" if the device is a part of R7S72100 SoC.
 - reg: offset and length of (1) the E-DMAC/feLic register block (required),
        (2) the TSU register block (optional).
index a2acd2b..9b03c57 100644 (file)
@@ -25,6 +25,10 @@ Required properties:
 - snps,force_sf_dma_mode       Force DMA to use the Store and Forward
                                mode for both tx and rx. This flag is
                                ignored if force_thresh_dma_mode is set.
+- snps,multicast-filter-bins:  Number of multicast filter hash bins
+                               supported by this device instance
+- snps,perfect-filter-entries: Number of perfect filter entries supported
+                               by this device instance
 
 Optional properties:
 - resets: Should contain a phandle to the STMMAC reset signal, if any
@@ -47,6 +51,8 @@ Examples:
                mac-address = [000000000000]; /* Filled in by U-Boot */
                max-frame-size = <3800>;
                phy-mode = "gmii";
+               snps,multicast-filter-bins = <256>;
+               snps,perfect-filter-entries = <128>;
                clocks = <&clock>;
                clock-names = "stmmaceth">;
        };
index 64fd7de..b355660 100644 (file)
@@ -4,6 +4,13 @@ Required properties:
 
   - compatible: Must contain one of the following:
 
+    - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
+    - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
+    - "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
+    - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
+    - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
+    - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
+    - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
     - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
     - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
     - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART.
index b82a268..bee6ff2 100644 (file)
@@ -23,6 +23,12 @@ Optional properties:
 - spi-max-frequency: Specifies maximum SPI clock frequency,
                      Units - Hz. Definition as per
                      Documentation/devicetree/bindings/spi/spi-bus.txt
+- num-cs:      total number of chipselects
+- cs-gpios:    should specify GPIOs used for chipselects.
+               The gpios will be referred to as reg = <index> in the SPI child
+               nodes.  If unspecified, a single SPI device without a chip
+               select can be used.
+
 
 SPI slave nodes must be children of the SPI master node and can contain
 properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
index 4d7f375..46a311e 100644 (file)
@@ -83,6 +83,7 @@ mosaixtech    Mosaix Technologies, Inc.
 moxa   Moxa
 mpl    MPL AG
 mundoreader    Mundo Reader S.L.
+murata Murata Manufacturing Co., Ltd.
 mxicy  Macronix International Co., Ltd.
 national       National Semiconductor
 neonode                Neonode Inc.
index 4e30eba..9af538b 100644 (file)
@@ -1,6 +1,17 @@
 Email clients info for Linux
 ======================================================================
 
+Git
+----------------------------------------------------------------------
+These days most developers use `git send-email` instead of regular
+email clients.  The man page for this is quite good.  On the receiving
+end, maintainers use `git am` to apply the patches.
+
+If you are new to git then send your first patch to yourself.  Save it
+as raw text including all the headers.  Run `git am raw_email.txt` and
+then review the changelog with `git log`.  When that works then send
+the patch to the appropriate mailing list(s).
+
 General Preferences
 ----------------------------------------------------------------------
 Patches for the Linux kernel are submitted via email, preferably as
index 3bfda94..057b770 100644 (file)
@@ -1,7 +1,7 @@
 Kernel driver ntc_thermistor
 =================
 
-Supported thermistors:
+Supported thermistors from Murata:
 * Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333
   Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473', 'ncp15wl333'
   Datasheet: Publicly available at Murata
@@ -15,9 +15,9 @@ Authors:
 Description
 -----------
 
-The NTC thermistor is a simple thermistor that requires users to provide the
-resistance and lookup the corresponding compensation table to get the
-temperature input.
+The NTC (Negative Temperature Coefficient) thermistor is a simple thermistor
+that requires users to provide the resistance and lookup the corresponding
+compensation table to get the temperature input.
 
 The NTC driver provides lookup tables with a linear approximation function
 and four circuit models with an option not to use any of the four models.
index f1ea2c6..c587a96 100644 (file)
@@ -281,6 +281,19 @@ gestures can normally be extracted from it.
 If INPUT_PROP_SEMI_MT is not set, the device is assumed to be a true MT
 device.
 
+INPUT_PROP_TOPBUTTONPAD:
+-----------------------
+Some laptops, most notably the Lenovo *40 series provide a trackstick
+device but do not have physical buttons associated with the trackstick
+device. Instead, the top area of the touchpad is marked to show
+visual/haptic areas for left, middle, right buttons intended to be used
+with the trackstick.
+
+If INPUT_PROP_TOPBUTTONPAD is set, userspace should emulate buttons
+accordingly. This property does not affect kernel behavior.
+The kernel does not provide button emulation for such devices but treats
+them as any other INPUT_PROP_BUTTONPAD device.
+
 Guidelines:
 ==========
 The guidelines below ensure proper single-touch and multi-finger functionality.
index d567a7c..c600e2f 100644 (file)
@@ -1171,7 +1171,7 @@ When kbuild executes, the following steps are followed (roughly):
              obvious reason.
 
     dtc
-       Create flattend device tree blob object suitable for linking
+       Create flattened device tree blob object suitable for linking
        into vmlinux. Device tree blobs linked into vmlinux are placed
        in an init section in the image. Platform code *must* copy the
        blob to non-init memory prior to calling unflatten_device_tree().
index 6eaa9cd..b7fa2f5 100644 (file)
@@ -1474,6 +1474,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        js=             [HW,JOY] Analog joystick
                        See Documentation/input/joystick.txt.
 
+       kaslr/nokaslr   [X86]
+                       Enable/disable kernel and module base offset ASLR
+                       (Address Space Layout Randomization) if built into
+                       the kernel. When CONFIG_HIBERNATION is selected,
+                       kASLR is disabled by default. When kASLR is enabled,
+                       hibernation will be disabled.
+
        keepinitrd      [HW,ARM]
 
        kernelcore=nn[KMG]      [KNL,X86,IA-64,PPC] This parameter
@@ -2110,10 +2117,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        noapic          [SMP,APIC] Tells the kernel to not make use of any
                        IOAPICs that may be present in the system.
 
-       nokaslr         [X86]
-                       Disable kernel and module base offset ASLR (Address
-                       Space Layout Randomization) if built into the kernel.
-
        noautogroup     Disable scheduler automatic task group creation.
 
        nobats          [PPC] Do not use BATs for mapping kernel lowmem
@@ -2184,6 +2187,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        in certain environments such as networked servers or
                        real-time systems.
 
+       nohibernate     [HIBERNATION] Disable hibernation and resume.
+
        nohz=           [KNL] Boottime enable/disable dynamic ticks
                        Valid arguments: on, off
                        Default: on
@@ -2785,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        leaf rcu_node structure.  Useful for very large
                        systems.
 
+       rcutree.jiffies_till_sched_qs= [KNL]
+                       Set required age in jiffies for a
+                       given grace period before RCU starts
+                       soliciting quiescent-state help from
+                       rcu_note_context_switch().
+
        rcutree.jiffies_till_first_fqs= [KNL]
                        Set delay from grace-period initialization to
                        first attempt to force quiescent states.
@@ -2980,6 +2991,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                noresume        Don't check if there's a hibernation image
                                present during boot.
                nocompress      Don't compress/decompress hibernation images.
+               no              Disable hibernation and resume.
 
        retain_initrd   [RAM] Keep initrd memory after extraction
 
@@ -3124,6 +3136,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        [KNL] Should the soft-lockup detector generate panics.
                        Format: <integer>
 
+       softlockup_all_cpu_backtrace=
+                       [KNL] Should the soft-lockup detector generate
+                       backtraces on all cpus.
+                       Format: <integer>
+
        sonypi.*=       [HW] Sony Programmable I/O Control Device driver
                        See Documentation/laptops/sonypi.txt
 
@@ -3515,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        the allocated input device; If set to 0, video driver
                        will only send out the event without touching backlight
                        brightness level.
-                       default: 0
+                       default: 1
 
        virtio_mmio.device=
                        [VMMIO] Memory mapped virtio (platform) device.
index d13b9a9..d399ae1 100644 (file)
@@ -8,8 +8,8 @@ disk-shock-protection.txt
        - information on hard disk shock protection.
 dslm.c
        - Simple Disk Sleep Monitor program
-hpfall.c
-       - (HP) laptop accelerometer program for disk protection.
+freefall.c
+       - (HP/DELL) laptop accelerometer program for disk protection.
 laptop-mode.txt
        - how to conserve battery power using laptop-mode.
 sony-laptop.txt
diff --git a/Documentation/laptops/freefall.c b/Documentation/laptops/freefall.c
new file mode 100644 (file)
index 0000000..aab2ff0
--- /dev/null
@@ -0,0 +1,177 @@
+/* Disk protection for HP/DELL machines.
+ *
+ * Copyright 2008 Eric Piel
+ * Copyright 2009 Pavel Machek <pavel@ucw.cz>
+ * Copyright 2012 Sonal Santan
+ * Copyright 2014 Pali Rohár <pali.rohar@gmail.com>
+ *
+ * GPLv2.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <string.h>
+#include <stdint.h>
+#include <errno.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sched.h>
+#include <syslog.h>
+
+static int noled;
+static char unload_heads_path[64];
+static char device_path[32];
+static const char app_name[] = "FREE FALL";
+
+static int set_unload_heads_path(char *device)
+{
+       char devname[64];
+
+       if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
+               return -EINVAL;
+       strncpy(devname, device + 5, sizeof(devname) - 1);
+       strncpy(device_path, device, sizeof(device_path) - 1);
+
+       snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
+                               "/sys/block/%s/device/unload_heads", devname);
+       return 0;
+}
+
+static int valid_disk(void)
+{
+       int fd = open(unload_heads_path, O_RDONLY);
+
+       if (fd < 0) {
+               perror(unload_heads_path);
+               return 0;
+       }
+
+       close(fd);
+       return 1;
+}
+
+static void write_int(char *path, int i)
+{
+       char buf[1024];
+       int fd = open(path, O_RDWR);
+
+       if (fd < 0) {
+               perror("open");
+               exit(1);
+       }
+
+       sprintf(buf, "%d", i);
+
+       if (write(fd, buf, strlen(buf)) != strlen(buf)) {
+               perror("write");
+               exit(1);
+       }
+
+       close(fd);
+}
+
+static void set_led(int on)
+{
+       if (noled)
+               return;
+       write_int("/sys/class/leds/hp::hddprotect/brightness", on);
+}
+
+static void protect(int seconds)
+{
+       const char *str = (seconds == 0) ? "Unparked" : "Parked";
+
+       write_int(unload_heads_path, seconds*1000);
+       syslog(LOG_INFO, "%s %s disk head\n", str, device_path);
+}
+
+static int on_ac(void)
+{
+       /* /sys/class/power_supply/AC0/online */
+       return 1;
+}
+
+static int lid_open(void)
+{
+       /* /proc/acpi/button/lid/LID/state */
+       return 1;
+}
+
+static void ignore_me(int signum)
+{
+       protect(0);
+       set_led(0);
+}
+
+int main(int argc, char **argv)
+{
+       int fd, ret;
+       struct stat st;
+       struct sched_param param;
+
+       if (argc == 1)
+               ret = set_unload_heads_path("/dev/sda");
+       else if (argc == 2)
+               ret = set_unload_heads_path(argv[1]);
+       else
+               ret = -EINVAL;
+
+       if (ret || !valid_disk()) {
+               fprintf(stderr, "usage: %s <device> (default: /dev/sda)\n",
+                               argv[0]);
+               exit(1);
+       }
+
+       fd = open("/dev/freefall", O_RDONLY);
+       if (fd < 0) {
+               perror("/dev/freefall");
+               return EXIT_FAILURE;
+       }
+
+       if (stat("/sys/class/leds/hp::hddprotect/brightness", &st))
+               noled = 1;
+
+       if (daemon(0, 0) != 0) {
+               perror("daemon");
+               return EXIT_FAILURE;
+       }
+
+       openlog(app_name, LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);
+
+       param.sched_priority = sched_get_priority_max(SCHED_FIFO);
+       sched_setscheduler(0, SCHED_FIFO, &param);
+       mlockall(MCL_CURRENT|MCL_FUTURE);
+
+       signal(SIGALRM, ignore_me);
+
+       for (;;) {
+               unsigned char count;
+
+               ret = read(fd, &count, sizeof(count));
+               alarm(0);
+               if ((ret == -1) && (errno == EINTR)) {
+                       /* Alarm expired, time to unpark the heads */
+                       continue;
+               }
+
+               if (ret != sizeof(count)) {
+                       perror("read");
+                       break;
+               }
+
+               protect(21);
+               set_led(1);
+               if (1 || on_ac() || lid_open())
+                       alarm(2);
+               else
+                       alarm(20);
+       }
+
+       closelog();
+       close(fd);
+       return EXIT_SUCCESS;
+}
diff --git a/Documentation/laptops/hpfall.c b/Documentation/laptops/hpfall.c
deleted file mode 100644 (file)
index b85dbba..0000000
+++ /dev/null
@@ -1,146 +0,0 @@
-/* Disk protection for HP machines.
- *
- * Copyright 2008 Eric Piel
- * Copyright 2009 Pavel Machek <pavel@ucw.cz>
- *
- * GPLv2.
- */
-
-#include <stdio.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/stat.h>
-#include <sys/types.h>
-#include <string.h>
-#include <stdint.h>
-#include <errno.h>
-#include <signal.h>
-#include <sys/mman.h>
-#include <sched.h>
-
-char unload_heads_path[64];
-
-int set_unload_heads_path(char *device)
-{
-       char devname[64];
-
-       if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
-               return -EINVAL;
-       strncpy(devname, device + 5, sizeof(devname));
-
-       snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
-                               "/sys/block/%s/device/unload_heads", devname);
-       return 0;
-}
-int valid_disk(void)
-{
-       int fd = open(unload_heads_path, O_RDONLY);
-       if (fd < 0) {
-               perror(unload_heads_path);
-               return 0;
-       }
-
-       close(fd);
-       return 1;
-}
-
-void write_int(char *path, int i)
-{
-       char buf[1024];
-       int fd = open(path, O_RDWR);
-       if (fd < 0) {
-               perror("open");
-               exit(1);
-       }
-       sprintf(buf, "%d", i);
-       if (write(fd, buf, strlen(buf)) != strlen(buf)) {
-               perror("write");
-               exit(1);
-       }
-       close(fd);
-}
-
-void set_led(int on)
-{
-       write_int("/sys/class/leds/hp::hddprotect/brightness", on);
-}
-
-void protect(int seconds)
-{
-       write_int(unload_heads_path, seconds*1000);
-}
-
-int on_ac(void)
-{
-//     /sys/class/power_supply/AC0/online
-}
-
-int lid_open(void)
-{
-//     /proc/acpi/button/lid/LID/state
-}
-
-void ignore_me(void)
-{
-       protect(0);
-       set_led(0);
-}
-
-int main(int argc, char **argv)
-{
-       int fd, ret;
-       struct sched_param param;
-
-       if (argc == 1)
-               ret = set_unload_heads_path("/dev/sda");
-       else if (argc == 2)
-               ret = set_unload_heads_path(argv[1]);
-       else
-               ret = -EINVAL;
-
-       if (ret || !valid_disk()) {
-               fprintf(stderr, "usage: %s <device> (default: /dev/sda)\n",
-                               argv[0]);
-               exit(1);
-       }
-
-       fd = open("/dev/freefall", O_RDONLY);
-       if (fd < 0) {
-               perror("/dev/freefall");
-               return EXIT_FAILURE;
-       }
-
-       daemon(0, 0);
-       param.sched_priority = sched_get_priority_max(SCHED_FIFO);
-       sched_setscheduler(0, SCHED_FIFO, &param);
-       mlockall(MCL_CURRENT|MCL_FUTURE);
-
-       signal(SIGALRM, ignore_me);
-
-       for (;;) {
-               unsigned char count;
-
-               ret = read(fd, &count, sizeof(count));
-               alarm(0);
-               if ((ret == -1) && (errno == EINTR)) {
-                       /* Alarm expired, time to unpark the heads */
-                       continue;
-               }
-
-               if (ret != sizeof(count)) {
-                       perror("read");
-                       break;
-               }
-
-               protect(21);
-               set_led(1);
-               if (1 || on_ac() || lid_open())
-                       alarm(2);
-               else
-                       alarm(20);
-       }
-
-       close(fd);
-       return EXIT_SUCCESS;
-}
index f304edb..45134dc 100644 (file)
@@ -209,15 +209,12 @@ If memory device is found, memory hotplug code will be called.
 
 4.2 Notify memory hot-add event by hand
 ------------
-On powerpc, the firmware does not notify a memory hotplug event to the kernel.
-Therefore, "probe" interface is supported to notify the event to the kernel.
-This interface depends on CONFIG_ARCH_MEMORY_PROBE.
-
-CONFIG_ARCH_MEMORY_PROBE is supported on powerpc only. On x86, this config
-option is disabled by default since ACPI notifies a memory hotplug event to
-the kernel, which performs its hotplug operation as the result. Please
-enable this option if you need the "probe" interface for testing purposes
-on x86.
+On some architectures, the firmware may not notify the kernel of a memory
+hotplug event.  Therefore, the memory "probe" interface is supported to
+explicitly notify the kernel.  This interface depends on
+CONFIG_ARCH_MEMORY_PROBE and can be configured on powerpc, sh, and x86
+if hotplug is supported, although for x86 this should be handled by ACPI
+notification.
 
 Probe interface is located at
 /sys/devices/system/memory/probe
index 9c723ec..eeb5b2e 100644 (file)
@@ -542,10 +542,10 @@ mode
 
                XOR policy: Transmit based on the selected transmit
                hash policy.  The default policy is a simple [(source
-               MAC address XOR'd with destination MAC address) modulo
-               slave count].  Alternate transmit policies may be
-               selected via the xmit_hash_policy option, described
-               below.
+               MAC address XOR'd with destination MAC address XOR
+               packet type ID) modulo slave count].  Alternate transmit
+               policies may be selected via the xmit_hash_policy option,
+               described below.
 
                This mode provides load balancing and fault tolerance.
 
@@ -801,10 +801,11 @@ xmit_hash_policy
 
        layer2
 
-               Uses XOR of hardware MAC addresses to generate the
-               hash.  The formula is
+               Uses XOR of hardware MAC addresses and packet type ID
+               field to generate the hash. The formula is
 
-               (source MAC XOR destination MAC) modulo slave count
+               hash = source MAC XOR destination MAC XOR packet type ID
+               slave number = hash modulo slave count
 
                This algorithm will place all traffic to a particular
                network peer on the same slave.
@@ -819,7 +820,7 @@ xmit_hash_policy
                Uses XOR of hardware MAC addresses and IP addresses to
                generate the hash.  The formula is
 
-               hash = source MAC XOR destination MAC
+               hash = source MAC XOR destination MAC XOR packet type ID
                hash = hash XOR source IP XOR destination IP
                hash = hash XOR (hash RSHIFT 16)
                hash = hash XOR (hash RSHIFT 8)
@@ -2301,13 +2302,13 @@ broadcast: Like active-backup, there is not much advantage to this
        bandwidth.  
 
        Additionally, the linux bonding 802.3ad implementation
-       distributes traffic by peer (using an XOR of MAC addresses),
-       so in a "gatewayed" configuration, all outgoing traffic will
-       generally use the same device.  Incoming traffic may also end
-       up on a single device, but that is dependent upon the
-       balancing policy of the peer's 8023.ad implementation.  In a
-       "local" configuration, traffic will be distributed across the
-       devices in the bond.
+       distributes traffic by peer (using an XOR of MAC addresses
+       and packet type ID), so in a "gatewayed" configuration, all
+       outgoing traffic will generally use the same device.  Incoming
+       traffic may also end up on a single device, but that is
+       dependent upon the balancing policy of the peer's 8023.ad
+       implementation.  In a "local" configuration, traffic will be
+       distributed across the devices in the bond.
 
        Finally, the 802.3ad mode mandates the use of the MII monitor,
        therefore, the ARP monitor is not available in this mode.
index ee78eba..c48a970 100644 (file)
@@ -586,12 +586,12 @@ team driver's classifier for its load-balancing mode, netfilter's xt_bpf
 extension, PTP dissector/classifier, and much more. They are all internally
 converted by the kernel into the new instruction set representation and run
 in the eBPF interpreter. For in-kernel handlers, this all works transparently
-by using sk_unattached_filter_create() for setting up the filter, resp.
-sk_unattached_filter_destroy() for destroying it. The macro
-SK_RUN_FILTER(filter, ctx) transparently invokes eBPF interpreter or JITed
-code to run the filter. 'filter' is a pointer to struct sk_filter that we
-got from sk_unattached_filter_create(), and 'ctx' the given context (e.g.
-skb pointer). All constraints and restrictions from sk_chk_filter() apply
+by using bpf_prog_create() for setting up the filter, resp.
+bpf_prog_destroy() for destroying it. The macro
+BPF_PROG_RUN(filter, ctx) transparently invokes eBPF interpreter or JITed
+code to run the filter. 'filter' is a pointer to struct bpf_prog that we
+got from bpf_prog_create(), and 'ctx' the given context (e.g.
+skb pointer). All constraints and restrictions from bpf_check_classic() apply
 before a conversion to the new layout is being done behind the scenes!
 
 Currently, the classic BPF format is being used for JITing on most of the
index f737273..a251bf4 100644 (file)
@@ -69,8 +69,11 @@ Additional Configurations
 
   FCoE
   ----
-  Fiber Channel over Ethernet (FCoE) hardware offload is not currently
-  supported.
+  The driver supports Fiber Channel over Ethernet (FCoE) and Data Center
+  Bridging (DCB) functionality. Configuring DCB and FCoE is outside the scope
+  of this driver doc. Refer to http://www.open-fcoe.org/ for FCoE project
+  information and http://www.open-lldp.org/ or email list
+  e1000-eedc@lists.sourceforge.net for DCB information.
 
   MAC and VLAN anti-spoofing feature
   ----------------------------------
index ab42c95..29a9351 100644 (file)
@@ -101,19 +101,17 @@ ipfrag_high_thresh - INTEGER
        Maximum memory used to reassemble IP fragments. When
        ipfrag_high_thresh bytes of memory is allocated for this purpose,
        the fragment handler will toss packets until ipfrag_low_thresh
-       is reached.
+       is reached. This also serves as a maximum limit to namespaces
+       different from the initial one.
 
 ipfrag_low_thresh - INTEGER
-       See ipfrag_high_thresh
+       Maximum memory used to reassemble IP fragments before the kernel
+       begins to remove incomplete fragment queues to free up resources.
+       The kernel still accepts new fragments for defragmentation.
 
 ipfrag_time - INTEGER
        Time in seconds to keep an IP fragment in memory.
 
-ipfrag_secret_interval - INTEGER
-       Regeneration interval (in seconds) of the hash secret (or lifetime
-       for the hash secret) for IP fragments.
-       Default: 600
-
 ipfrag_max_dist - INTEGER
        ipfrag_max_dist is a non-negative integer value which defines the
        maximum "disorder" which is allowed among fragments which share a
@@ -1132,6 +1130,15 @@ flowlabel_consistency - BOOLEAN
        FALSE: disabled
        Default: TRUE
 
+auto_flowlabels - BOOLEAN
+       Automatically generate flow labels based based on a flow hash
+       of the packet. This allows intermediate devices, such as routers,
+       to idenfify packet flows for mechanisms like Equal Cost Multipath
+       Routing (see RFC 6438).
+       TRUE: enabled
+       FALSE: disabled
+       Default: false
+
 anycast_src_echo_reply - BOOLEAN
        Controls the use of anycast addresses as source addresses for ICMPv6
        echo reply
@@ -1153,11 +1160,6 @@ ip6frag_low_thresh - INTEGER
 ip6frag_time - INTEGER
        Time in seconds to keep an IPv6 fragment in memory.
 
-ip6frag_secret_interval - INTEGER
-       Regeneration interval (in seconds) of the hash secret (or lifetime
-       for the hash secret) for IPv6 fragments.
-       Default: 600
-
 conf/default/*:
        Change the interface-specific default settings.
 
@@ -1210,6 +1212,18 @@ accept_ra_defrtr - BOOLEAN
        Functional default: enabled if accept_ra is enabled.
                            disabled if accept_ra is disabled.
 
+accept_ra_from_local - BOOLEAN
+       Accept RA with source-address that is found on local machine
+        if the RA is otherwise proper and able to be accepted.
+        Default is to NOT accept these as it may be an un-intended
+        network loop.
+
+       Functional default:
+           enabled if accept_ra_from_local is enabled
+               on a specific interface.
+          disabled if accept_ra_from_local is disabled
+               on a specific interface.
+
 accept_ra_pinfo - BOOLEAN
        Learn Prefix Information in Router Advertisement.
 
index 38112d5..a6d7cb9 100644 (file)
@@ -1008,14 +1008,9 @@ hardware timestamps to be used. Note: you may need to enable the generation
 of hardware timestamps with SIOCSHWTSTAMP (see related information from
 Documentation/networking/timestamping.txt).
 
-PACKET_TIMESTAMP accepts the same integer bit field as
-SO_TIMESTAMPING.  However, only the SOF_TIMESTAMPING_SYS_HARDWARE
-and SOF_TIMESTAMPING_RAW_HARDWARE values are recognized by
-PACKET_TIMESTAMP.  SOF_TIMESTAMPING_SYS_HARDWARE takes precedence over
-SOF_TIMESTAMPING_RAW_HARDWARE if both bits are set.
-
-    int req = 0;
-    req |= SOF_TIMESTAMPING_SYS_HARDWARE;
+PACKET_TIMESTAMP accepts the same integer bit field as SO_TIMESTAMPING:
+
+    int req = SOF_TIMESTAMPING_RAW_HARDWARE;
     setsockopt(fd, SOL_PACKET, PACKET_TIMESTAMP, (void *) &req, sizeof(req))
 
 For the mmap(2)ed ring buffers, such timestamps are stored in the
@@ -1023,14 +1018,13 @@ tpacket{,2,3}_hdr structure's tp_sec and tp_{n,u}sec members. To determine
 what kind of timestamp has been reported, the tp_status field is binary |'ed
 with the following possible bits ...
 
-    TP_STATUS_TS_SYS_HARDWARE
     TP_STATUS_TS_RAW_HARDWARE
     TP_STATUS_TS_SOFTWARE
 
 ... that are equivalent to its SOF_TIMESTAMPING_* counterparts. For the
-RX_RING, if none of those 3 are set (i.e. PACKET_TIMESTAMP is not set),
-then this means that a software fallback was invoked *within* PF_PACKET's
-processing code (less precise).
+RX_RING, if neither is set (i.e. PACKET_TIMESTAMP is not set), then a
+software fallback was invoked *within* PF_PACKET's processing code (less
+precise).
 
 Getting timestamps for the TX_RING works as follows: i) fill the ring frames,
 ii) call sendto() e.g. in blocking mode, iii) wait for status of relevant
index 3544c98..e839e7e 100644 (file)
@@ -272,6 +272,8 @@ Writing a PHY driver
    txtsamp: Requests a transmit timestamp at the PHY level for a 'skb'
    set_wol: Enable Wake-on-LAN at the PHY level
    get_wol: Get the Wake-on-LAN status at the PHY level
+   read_mmd_indirect: Read PHY MMD indirect register
+   write_mmd_indirect: Write PHY MMD indirect register
 
  Of these, only config_aneg and read_status are required to be
  assigned by the driver code.  The rest are optional.  Also, it is
@@ -284,7 +286,21 @@ Writing a PHY driver
 
  Feel free to look at the Marvell, Cicada, and Davicom drivers in
  drivers/net/phy/ for examples (the lxt and qsemi drivers have
- not been tested as of this writing)
+ not been tested as of this writing).
+
+ The PHY's MMD register accesses are handled by the PAL framework
+ by default, but can be overridden by a specific PHY driver if
+ required. This could be the case if a PHY was released for
+ manufacturing before the MMD PHY register definitions were
+ standardized by the IEEE. Most modern PHYs will be able to use
+ the generic PAL framework for accessing the PHY's MMD registers.
+ An example of such usage is for Energy Efficient Ethernet support,
+ implemented in the PAL. This support uses the PAL to access MMD
+ registers for EEE query and configuration if the PHY supports
+ the IEEE standard access mechanisms, or can use the PHY's specific
+ access interfaces if overridden by the specific PHY driver. See
+ the Micrel driver in drivers/net/phy/ for an example of how this
+ can be implemented.
 
 Board Fixups
 
index 0e30c78..0dffc6e 100644 (file)
@@ -24,6 +24,34 @@ For monitoring and control pktgen creates:
         /proc/net/pktgen/ethX
 
 
+Tuning NIC for max performance
+==============================
+
+The default NIC setting are (likely) not tuned for pktgen's artificial
+overload type of benchmarking, as this could hurt the normal use-case.
+
+Specifically increasing the TX ring buffer in the NIC:
+ # ethtool -G ethX tx 1024
+
+A larger TX ring can improve pktgen's performance, while it can hurt
+in the general case, 1) because the TX ring buffer might get larger
+than the CPUs L1/L2 cache, 2) because it allow more queueing in the
+NIC HW layer (which is bad for bufferbloat).
+
+One should be careful to conclude, that packets/descriptors in the HW
+TX ring cause delay.  Drivers usually delay cleaning up the
+ring-buffers (for various performance reasons), thus packets stalling
+the TX ring, might just be waiting for cleanup.
+
+This cleanup issues is specifically the case, for the driver ixgbe
+(Intel 82599 chip).  This driver (ixgbe) combine TX+RX ring cleanups,
+and the cleanup interval is affected by the ethtool --coalesce setting
+of parameter "rx-usecs".
+
+For ixgbe use e.g "30" resulting in approx 33K interrupts/sec (1/30*10^6):
+ # ethtool -C ethX rx-usecs 30
+
+
 Viewing threads
 ===============
 /proc/net/pktgen/kpktgend_0 
index bc35541..897f942 100644 (file)
@@ -40,7 +40,7 @@ the set bits correspond to data that is available, then the control
 message will not be generated:
 
 SOF_TIMESTAMPING_SOFTWARE:     report systime if available
-SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available
+SOF_TIMESTAMPING_SYS_HARDWARE: report hwtimetrans if available (deprecated)
 SOF_TIMESTAMPING_RAW_HARDWARE: report hwtimeraw if available
 
 It is worth noting that timestamps may be collected for reasons other
@@ -88,13 +88,12 @@ hwtimeraw is the original hardware time stamp. Filled in if
 SOF_TIMESTAMPING_RAW_HARDWARE is set. No assumptions about its
 relation to system time should be made.
 
-hwtimetrans is the hardware time stamp transformed so that it
-corresponds as good as possible to system time. This correlation is
-not perfect; as a consequence, sorting packets received via different
-NICs by their hwtimetrans may differ from the order in which they were
-received. hwtimetrans may be non-monotonic even for the same NIC.
-Filled in if SOF_TIMESTAMPING_SYS_HARDWARE is set. Requires support
-by the network device and will be empty without that support.
+hwtimetrans is always zero. This field is deprecated. It used to hold
+hw timestamps converted to system time. Instead, expose the hardware
+clock device on the NIC directly as a HW PTP clock source, to allow
+time conversion in userspace and optionally synchronize system time
+with a userspace PTP stack such as linuxptp. For the PTP clock API,
+see Documentation/ptp/ptp.txt.
 
 
 SIOCSHWTSTAMP, SIOCGHWTSTAMP:
@@ -185,7 +184,6 @@ struct skb_shared_hwtstamps {
         * since arbitrary point in time
         */
        ktime_t hwtstamp;
-       ktime_t syststamp; /* hwtstamp transformed to system time base */
 };
 
 Time stamps for outgoing packets are to be generated as follows:
index 8ba82bf..5cdfd74 100644 (file)
@@ -76,7 +76,6 @@ static void usage(const char *error)
               "  SOF_TIMESTAMPING_RX_HARDWARE - hardware time stamping of incoming packets\n"
               "  SOF_TIMESTAMPING_RX_SOFTWARE - software fallback for incoming packets\n"
               "  SOF_TIMESTAMPING_SOFTWARE - request reporting of software time stamps\n"
-              "  SOF_TIMESTAMPING_SYS_HARDWARE - request reporting of transformed HW time stamps\n"
               "  SOF_TIMESTAMPING_RAW_HARDWARE - request reporting of raw HW time stamps\n"
               "  SIOCGSTAMP - check last socket time stamp\n"
               "  SIOCGSTAMPNS - more accurate socket time stamp\n");
@@ -202,9 +201,7 @@ static void printpacket(struct msghdr *msg, int res,
                                       (long)stamp->tv_sec,
                                       (long)stamp->tv_nsec);
                                stamp++;
-                               printf("HW transformed %ld.%09ld ",
-                                      (long)stamp->tv_sec,
-                                      (long)stamp->tv_nsec);
+                               /* skip deprecated HW transformed */
                                stamp++;
                                printf("HW raw %ld.%09ld",
                                       (long)stamp->tv_sec,
@@ -361,8 +358,6 @@ int main(int argc, char **argv)
                        so_timestamping_flags |= SOF_TIMESTAMPING_RX_SOFTWARE;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SOFTWARE"))
                        so_timestamping_flags |= SOF_TIMESTAMPING_SOFTWARE;
-               else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_SYS_HARDWARE"))
-                       so_timestamping_flags |= SOF_TIMESTAMPING_SYS_HARDWARE;
                else if (!strcasecmp(argv[i], "SOF_TIMESTAMPING_RAW_HARDWARE"))
                        so_timestamping_flags |= SOF_TIMESTAMPING_RAW_HARDWARE;
                else
index f1ac2da..ba1d502 100644 (file)
@@ -17,6 +17,7 @@
  *  along with this program; if not, write to the Free Software
  *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  */
+#define _GNU_SOURCE
 #include <errno.h>
 #include <fcntl.h>
 #include <inttypes.h>
 #define CLOCK_INVALID -1
 #endif
 
-/* When glibc offers the syscall, this will go away. */
+/* clock_adjtime is not available in GLIBC < 2.14 */
+#if !__GLIBC_PREREQ(2, 14)
 #include <sys/syscall.h>
 static int clock_adjtime(clockid_t id, struct timex *tx)
 {
        return syscall(__NR_clock_adjtime, id, tx);
 }
+#endif
 
 static clockid_t get_clockid(int fd)
 {
index 85c362d..d1ab5e1 100644 (file)
@@ -286,6 +286,11 @@ STAC92HD83*
   hp-inv-led   HP with broken BIOS for inverted mute LED
   auto         BIOS setup (default)
 
+STAC92HD95
+==========
+  hp-led       LED support for HP laptops
+  hp-bass      Bass HPF setup for HP Spectre 13
+
 STAC9872
 ========
   vaio         VAIO laptop without SPDIF
index 708bb7f..c14374e 100644 (file)
@@ -75,6 +75,7 @@ show up in /proc/sys/kernel:
 - shmall
 - shmmax                      [ sysv ipc ]
 - shmmni
+- softlockup_all_cpu_backtrace
 - stop-a                      [ SPARC only ]
 - sysrq                       ==> Documentation/sysrq.txt
 - sysctl_writes_strict
@@ -783,6 +784,22 @@ via the /proc/sys interface:
 
 ==============================================================
 
+softlockup_all_cpu_backtrace:
+
+This value controls the soft lockup detector thread's behavior
+when a soft lockup condition is detected as to whether or not
+to gather further debug information. If enabled, each cpu will
+be issued an NMI and instructed to capture stack trace.
+
+This feature is only applicable for architectures which support
+NMI.
+
+0: do nothing. This is the default behavior.
+
+1: on detection capture more debug information.
+
+==============================================================
+
 tainted:
 
 Non-zero if the kernel has been tainted.  Numeric values, which
index bd4b34c..4415aa9 100644 (file)
@@ -702,7 +702,8 @@ The batch value of each per cpu pagelist is also updated as a result.  It is
 set to pcp->high/4.  The upper limit of batch is (PAGE_SHIFT * 8)
 
 The initial value is zero.  Kernel does not use this value at boot time to set
-the high water marks for each per cpu page list.
+the high water marks for each per cpu page list.  If the user writes '0' to this
+sysctl, it will revert to this default behavior.
 
 ==============================================================
 
index efceb78..60bc293 100644 (file)
@@ -4,7 +4,7 @@ Kernel driver nouveau
 Supported chips:
 * NV43+
 
-Authors: Martin Peres (mupuf) <martin.peres@labri.fr>
+Authors: Martin Peres (mupuf) <martin.peres@free.fr>
 
 Description
 ---------
@@ -68,8 +68,9 @@ Your fan can be driven in different modes:
 
 NOTE: Be sure to use the manual mode if you want to drive the fan speed manually
 
-NOTE2: Not all fan management modes may be supported on all chipsets. We are
-working on it.
+NOTE2: When operating in manual mode outside the vbios-defined
+[PWM_min, PWM_max] range, the reported fan speed (RPM) may not be accurate
+depending on your hardware.
 
 Bug reports
 ---------
index 00e425f..78c9a7b 100644 (file)
@@ -47,7 +47,6 @@ use constant HIGH_KSWAPD_REWAKEUP             => 21;
 use constant HIGH_NR_SCANNED                   => 22;
 use constant HIGH_NR_TAKEN                     => 23;
 use constant HIGH_NR_RECLAIMED                 => 24;
-use constant HIGH_NR_CONTIG_DIRTY              => 25;
 
 my %perprocesspid;
 my %perprocess;
@@ -105,7 +104,7 @@ my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
 my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
 my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
 my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
-my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)';
+my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) file=([0-9]*)';
 my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
 my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
 my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
@@ -200,7 +199,7 @@ $regex_lru_isolate = generate_traceevent_regex(
                        $regex_lru_isolate_default,
                        "isolate_mode", "order",
                        "nr_requested", "nr_scanned", "nr_taken",
-                       "contig_taken", "contig_dirty", "contig_failed");
+                       "file");
 $regex_lru_shrink_inactive = generate_traceevent_regex(
                        "vmscan/mm_vmscan_lru_shrink_inactive",
                        $regex_lru_shrink_inactive_default,
@@ -375,7 +374,6 @@ EVENT_PROCESS:
                        }
                        my $isolate_mode = $1;
                        my $nr_scanned = $4;
-                       my $nr_contig_dirty = $7;
 
                        # To closer match vmstat scanning statistics, only count isolate_both
                        # and isolate_inactive as scanning. isolate_active is rotation
@@ -385,7 +383,6 @@ EVENT_PROCESS:
                        if ($isolate_mode != 2) {
                                $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
                        }
-                       $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
                } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
                        $details = $6;
                        if ($details !~ /$regex_lru_shrink_inactive/o) {
@@ -539,13 +536,6 @@ sub dump_stats {
                                }
                        }
                }
-               if ($stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY}) {
-                       print "      ";
-                       my $count = $stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY};
-                       if ($count != 0) {
-                               print "contig-dirty=$count ";
-                       }
-               }
 
                print "\n";
        }
index 1b9bef7..f00d2c5 100644 (file)
@@ -164,7 +164,6 @@ F:  drivers/net/hamradio/6pack.c
 
 8169 10/100/1000 GIGABIT ETHERNET DRIVER
 M:     Realtek linux nic maintainers <nic_swsd@realtek.com>
-M:     Francois Romieu <romieu@fr.zoreil.com>
 L:     netdev@vger.kernel.org
 S:     Maintained
 F:     drivers/net/ethernet/realtek/r8169.c
@@ -951,16 +950,10 @@ L:        linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
 F:     arch/arm/mach-imx/
+F:     arch/arm/mach-mxs/
 F:     arch/arm/boot/dts/imx*
 F:     arch/arm/configs/imx*_defconfig
 
-ARM/FREESCALE MXS ARM ARCHITECTURE
-M:     Shawn Guo <shawn.guo@linaro.org>
-L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-S:     Maintained
-T:     git git://git.linaro.org/people/shawnguo/linux-2.6.git
-F:     arch/arm/mach-mxs/
-
 ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1060,9 +1053,33 @@ M:       Santosh Shilimkar <santosh.shilimkar@ti.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-keystone/
-F:     drivers/clk/keystone/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
 
+ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/clk/keystone/
+
+ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/clocksource/timer-keystone.c
+
+ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/power/reset/keystone-reset.c
+
+ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS
+M:     Santosh Shilimkar <santosh.shilimkar@ti.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/memory/*emif*
+
 ARM/LOGICPD PXA270 MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1304,6 +1321,20 @@ W:       http://oss.renesas.com
 Q:     http://patchwork.kernel.org/project/linux-sh/list/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
 S:     Supported
+F:     arch/arm/boot/dts/emev2*
+F:     arch/arm/boot/dts/r7s*
+F:     arch/arm/boot/dts/r8a*
+F:     arch/arm/boot/dts/sh*
+F:     arch/arm/configs/ape6evm_defconfig
+F:     arch/arm/configs/armadillo800eva_defconfig
+F:     arch/arm/configs/bockw_defconfig
+F:     arch/arm/configs/genmai_defconfig
+F:     arch/arm/configs/koelsch_defconfig
+F:     arch/arm/configs/kzm9g_defconfig
+F:     arch/arm/configs/lager_defconfig
+F:     arch/arm/configs/mackerel_defconfig
+F:     arch/arm/configs/marzen_defconfig
+F:     arch/arm/configs/shmobile_defconfig
 F:     arch/arm/mach-shmobile/
 F:     drivers/sh/
 
@@ -1889,6 +1920,13 @@ S:       Supported
 F:     drivers/net/bonding/
 F:     include/uapi/linux/if_bonding.h
 
+BPF (Safe dynamic programs and tools)
+M:     Alexei Starovoitov <ast@kernel.org>
+L:     netdev@vger.kernel.org
+L:     linux-kernel@vger.kernel.org
+S:     Supported
+F:     kernel/bpf/
+
 BROADCOM B44 10/100 ETHERNET DRIVER
 M:     Gary Zambrano <zambrano@broadcom.com>
 L:     netdev@vger.kernel.org
@@ -1902,7 +1940,8 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/genet/
 
 BROADCOM BNX2 GIGABIT ETHERNET DRIVER
-M:     Michael Chan <mchan@broadcom.com>
+M:     Sony Chacko <sony.chacko@qlogic.com>
+M:     Dept-HSGLinuxNICDev@qlogic.com
 L:     netdev@vger.kernel.org
 S:     Supported
 F:     drivers/net/ethernet/broadcom/bnx2.*
@@ -1947,7 +1986,7 @@ F:        arch/arm/boot/dts/bcm5301x.dtsi
 F:     arch/arm/boot/dts/bcm470*
 
 BROADCOM TG3 GIGABIT ETHERNET DRIVER
-M:     Nithin Nayak Sujir <nsujir@broadcom.com>
+M:     Prashant Sreedharan <prashant@broadcom.com>
 M:     Michael Chan <mchan@broadcom.com>
 L:     netdev@vger.kernel.org
 S:     Supported
@@ -2925,6 +2964,9 @@ L:        linux-doc@vger.kernel.org
 T:     quilt http://www.infradead.org/~rdunlap/Doc/patches/
 S:     Maintained
 F:     Documentation/
+X:     Documentation/ABI/
+X:     Documentation/devicetree/
+X:     Documentation/[a-z][a-z]_[A-Z][A-Z]/
 
 DOUBLETALK DRIVER
 M:     "James R. Van Zandt" <jrv@vanzandt.mv.com>
@@ -3197,14 +3239,6 @@ L:       linux-scsi@vger.kernel.org
 S:     Maintained
 F:     drivers/scsi/eata_pio.*
 
-EBTABLES
-L:     netfilter-devel@vger.kernel.org
-W:     http://ebtables.sourceforge.net/
-S:     Orphan
-F:     include/linux/netfilter_bridge/ebt_*.h
-F:     include/uapi/linux/netfilter_bridge/ebt_*.h
-F:     net/bridge/netfilter/ebt*.c
-
 EC100 MEDIA DRIVER
 M:     Antti Palosaari <crope@iki.fi>
 L:     linux-media@vger.kernel.org
@@ -4492,8 +4526,7 @@ S:        Supported
 F:     drivers/idle/i7300_idle.c
 
 IEEE 802.15.4 SUBSYSTEM
-M:     Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
-M:     Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
+M:     Alexander Aring <alex.aring@gmail.com>
 L:     linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
 W:     http://apps.sourceforge.net/trac/linux-zigbee
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -5525,10 +5558,11 @@ S:      Maintained
 F:     arch/arm/mach-lpc32xx/
 
 LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
-M:     Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com>
-M:     Sreekanth Reddy <Sreekanth.Reddy@lsi.com>
-M:     support@lsi.com
-L:     DL-MPTFusionLinux@lsi.com
+M:     Nagalakshmi Nandigama <nagalakshmi.nandigama@avagotech.com>
+M:     Praveen Krishnamoorthy <praveen.krishnamoorthy@avagotech.com>
+M:     Sreekanth Reddy <sreekanth.reddy@avagotech.com>
+M:     Abhijit Mahajan <abhijit.mahajan@avagotech.com>
+L:     MPT-FusionLinux.pdl@avagotech.com
 L:     linux-scsi@vger.kernel.org
 W:     http://www.lsilogic.com/support
 S:     Supported
@@ -6104,12 +6138,11 @@ F:      Documentation/networking/s2io.txt
 F:     Documentation/networking/vxge.txt
 F:     drivers/net/ethernet/neterion/
 
-NETFILTER/IPTABLES
+NETFILTER ({IP,IP6,ARP,EB,NF}TABLES)
 M:     Pablo Neira Ayuso <pablo@netfilter.org>
 M:     Patrick McHardy <kaber@trash.net>
 M:     Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
 L:     netfilter-devel@vger.kernel.org
-L:     netfilter@vger.kernel.org
 L:     coreteam@netfilter.org
 W:     http://www.netfilter.org/
 W:     http://www.iptables.org/
@@ -6773,7 +6806,7 @@ F:        arch/x86/kernel/quirks.c
 
 PCI DRIVER FOR IMX6
 M:     Richard Zhu <r65037@freescale.com>
-M:     Shawn Guo <shawn.guo@linaro.org>
+M:     Shawn Guo <shawn.guo@freescale.com>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
@@ -6930,6 +6963,12 @@ L:       linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     drivers/pinctrl/pinctrl-at91.c
 
+PIN CONTROLLER - RENESAS
+M:     Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+L:     linux-sh@vger.kernel.org
+S:     Maintained
+F:     drivers/pinctrl/sh-pfc/
+
 PIN CONTROLLER - SAMSUNG
 M:     Tomasz Figa <t.figa@samsung.com>
 M:     Thomas Abraham <thomas.abraham@linaro.org>
@@ -6959,7 +6998,7 @@ PKUNITY SOC DRIVERS
 M:     Guan Xuetao <gxt@mprc.pku.edu.cn>
 W:     http://mprc.pku.edu.cn/~guanxuetao/linux
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+T:     git git://github.com/gxt/linux.git
 F:     drivers/input/serio/i8042-unicore32io.h
 F:     drivers/i2c/busses/i2c-puv3.c
 F:     drivers/video/fb-puv3.c
@@ -7947,6 +7986,7 @@ F:        drivers/mmc/host/sdhci-spear.c
 
 SECURITY SUBSYSTEM
 M:     James Morris <james.l.morris@oracle.com>
+M:     Serge E. Hallyn <serge@hallyn.com>
 L:     linux-security-module@vger.kernel.org (suggested Cc:)
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/jmorris/linux-security.git
 W:     http://kernsec.org/
@@ -7992,6 +8032,16 @@ F:       drivers/ata/
 F:     include/linux/ata.h
 F:     include/linux/libata.h
 
+SERIAL ATA AHCI PLATFORM devices support
+M:     Hans de Goede <hdegoede@redhat.com>
+M:     Tejun Heo <tj@kernel.org>
+L:     linux-ide@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tj/libata.git
+S:     Supported
+F:     drivers/ata/ahci_platform.c
+F:     drivers/ata/libahci_platform.c
+F:     include/linux/ahci_platform.h
+
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
 M:     Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
 L:     linux-scsi@vger.kernel.org
@@ -8194,13 +8244,15 @@ S:      Maintained
 F:     drivers/usb/misc/sisusbvga/
 
 SLAB ALLOCATOR
-M:     Christoph Lameter <cl@linux-foundation.org>
+M:     Christoph Lameter <cl@linux.com>
 M:     Pekka Enberg <penberg@kernel.org>
-M:     Matt Mackall <mpm@selenic.com>
+M:     David Rientjes <rientjes@google.com>
+M:     Joonsoo Kim <iamjoonsoo.kim@lge.com>
+M:     Andrew Morton <akpm@linux-foundation.org>
 L:     linux-mm@kvack.org
 S:     Maintained
 F:     include/linux/sl?b*.h
-F:     mm/sl?b.c
+F:     mm/sl?b*
 
 SLEEPABLE READ-COPY UPDATE (SRCU)
 M:     Lai Jiangshan <laijs@cn.fujitsu.com>
@@ -8967,7 +9019,7 @@ F:        drivers/media/radio/radio-raremono.c
 
 THERMAL
 M:     Zhang Rui <rui.zhang@intel.com>
-M:     Eduardo Valentin <eduardo.valentin@ti.com>
+M:     Eduardo Valentin <edubezval@gmail.com>
 L:     linux-pm@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
@@ -8994,7 +9046,7 @@ S:        Maintained
 F:     drivers/platform/x86/thinkpad_acpi.c
 
 TI BANDGAP AND THERMAL DRIVER
-M:     Eduardo Valentin <eduardo.valentin@ti.com>
+M:     Eduardo Valentin <edubezval@gmail.com>
 L:     linux-pm@vger.kernel.org
 S:     Supported
 F:     drivers/thermal/ti-soc-thermal/
@@ -9275,7 +9327,7 @@ UNICORE32 ARCHITECTURE:
 M:     Guan Xuetao <gxt@mprc.pku.edu.cn>
 W:     http://mprc.pku.edu.cn/~guanxuetao/linux
 S:     Maintained
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/epip/linux-2.6-unicore32.git
+T:     git git://github.com/gxt/linux.git
 F:     arch/unicore32/
 
 UNIFDEF
@@ -9408,12 +9460,6 @@ S:       Maintained
 F:     drivers/usb/host/isp116x*
 F:     include/linux/usb/isp116x.h
 
-USB KAWASAKI LSI DRIVER
-M:     Oliver Neukum <oliver@neukum.org>
-L:     linux-usb@vger.kernel.org
-S:     Maintained
-F:     drivers/usb/serial/kl5kusb105.*
-
 USB MASS STORAGE DRIVER
 M:     Matthew Dharm <mdharm-usb@one-eyed-alien.net>
 L:     linux-usb@vger.kernel.org
@@ -9441,12 +9487,6 @@ S:       Maintained
 F:     Documentation/usb/ohci.txt
 F:     drivers/usb/host/ohci*
 
-USB OPTION-CARD DRIVER
-M:     Matthias Urlichs <smurf@smurf.noris.de>
-L:     linux-usb@vger.kernel.org
-S:     Maintained
-F:     drivers/usb/serial/option.c
-
 USB PEGASUS DRIVER
 M:     Petko Manolov <petkan@nucleusys.com>
 L:     linux-usb@vger.kernel.org
@@ -9479,7 +9519,7 @@ S:        Maintained
 F:     drivers/net/usb/rtl8150.c
 
 USB SERIAL SUBSYSTEM
-M:     Johan Hovold <jhovold@gmail.com>
+M:     Johan Hovold <johan@kernel.org>
 L:     linux-usb@vger.kernel.org
 S:     Maintained
 F:     Documentation/usb/usb-serial.txt
@@ -9742,6 +9782,14 @@ L:       virtualization@lists.linux-foundation.org
 S:     Supported
 F:     arch/x86/kernel/cpu/vmware.c
 
+VMWARE BALLOON DRIVER
+M:     Xavier Deguillard <xdeguillard@vmware.com>
+M:     Philip Moltmann <moltmann@vmware.com>
+M:     "VMware, Inc." <pv-drivers@vmware.com>
+L:     linux-kernel@vger.kernel.org
+S:     Maintained
+F:     drivers/misc/vmw_balloon.c
+
 VMWARE VMXNET3 ETHERNET DRIVER
 M:     Shreyas Bhatewara <sbhatewara@vmware.com>
 M:     "VMware, Inc." <pv-drivers@vmware.com>
index 97b2861..f6a7794 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 3
 PATCHLEVEL = 16
 SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc7
 NAME = Shuffling Zombie Juror
 
 # *DOCUMENTATION*
@@ -41,6 +41,29 @@ unexport GREP_OPTIONS
 # descending is started. They are now explicitly listed as the
 # prepare rule.
 
+# Beautify output
+# ---------------------------------------------------------------------------
+#
+# Normally, we echo the whole command before executing it. By making
+# that echo $($(quiet)$(cmd)), we now have the possibility to set
+# $(quiet) to choose other forms of output instead, e.g.
+#
+#         quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
+#         cmd_cc_o_c       = $(CC) $(c_flags) -c -o $@ $<
+#
+# If $(quiet) is empty, the whole command will be printed.
+# If it is set to "quiet_", only the short version will be printed.
+# If it is set to "silent_", nothing will be printed at all, since
+# the variable $(silent_cmd_cc_o_c) doesn't exist.
+#
+# A simple variant is to prefix commands with $(Q) - that's useful
+# for commands that shall be hidden in non-verbose mode.
+#
+#      $(Q)ln $@ :<
+#
+# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
+# If KBUILD_VERBOSE equals 1 then the above command is displayed.
+#
 # To put more focus on warnings, be less verbose as default
 # Use 'make V=1' to see the full commands
 
@@ -51,6 +74,29 @@ ifndef KBUILD_VERBOSE
   KBUILD_VERBOSE = 0
 endif
 
+ifeq ($(KBUILD_VERBOSE),1)
+  quiet =
+  Q =
+else
+  quiet=quiet_
+  Q = @
+endif
+
+# If the user is running make -s (silent mode), suppress echoing of
+# commands
+
+ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
+ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
+  quiet=silent_
+endif
+else                                   # make-3.8x
+ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
+  quiet=silent_
+endif
+endif
+
+export quiet Q KBUILD_VERBOSE
+
 # Call a source code checker (by default, "sparse") as part of the
 # C compilation.
 #
@@ -126,7 +172,13 @@ PHONY += $(MAKECMDGOALS) sub-make
 $(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
        @:
 
+# Fake the "Entering directory" message once, so that IDEs/editors are
+# able to understand relative filenames.
+       echodir := @echo
+ quiet_echodir := @echo
+silent_echodir := @:
 sub-make: FORCE
+       $($(quiet)echodir) "make[1]: Entering directory \`$(KBUILD_OUTPUT)'"
        $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
        KBUILD_SRC=$(CURDIR) \
        KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \
@@ -289,52 +341,6 @@ endif
 export KBUILD_MODULES KBUILD_BUILTIN
 export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
 
-# Beautify output
-# ---------------------------------------------------------------------------
-#
-# Normally, we echo the whole command before executing it. By making
-# that echo $($(quiet)$(cmd)), we now have the possibility to set
-# $(quiet) to choose other forms of output instead, e.g.
-#
-#         quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
-#         cmd_cc_o_c       = $(CC) $(c_flags) -c -o $@ $<
-#
-# If $(quiet) is empty, the whole command will be printed.
-# If it is set to "quiet_", only the short version will be printed.
-# If it is set to "silent_", nothing will be printed at all, since
-# the variable $(silent_cmd_cc_o_c) doesn't exist.
-#
-# A simple variant is to prefix commands with $(Q) - that's useful
-# for commands that shall be hidden in non-verbose mode.
-#
-#      $(Q)ln $@ :<
-#
-# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
-# If KBUILD_VERBOSE equals 1 then the above command is displayed.
-
-ifeq ($(KBUILD_VERBOSE),1)
-  quiet =
-  Q =
-else
-  quiet=quiet_
-  Q = @
-endif
-
-# If the user is running make -s (silent mode), suppress echoing of
-# commands
-
-ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
-ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
-  quiet=silent_
-endif
-else                                   # make-3.8x
-ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
-  quiet=silent_
-endif
-endif
-
-export quiet Q KBUILD_VERBOSE
-
 ifneq ($(CC),)
 ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
 COMPILER := clang
@@ -682,6 +688,8 @@ KBUILD_CFLAGS       += -fomit-frame-pointer
 endif
 endif
 
+KBUILD_CFLAGS   += $(call cc-option, -fno-var-tracking-assignments)
+
 ifdef CONFIG_DEBUG_INFO
 KBUILD_CFLAGS  += -g
 KBUILD_AFLAGS  += -Wa,-gdwarf-2
@@ -1170,7 +1178,7 @@ distclean: mrproper
 # Packaging of the kernel to various formats
 # ---------------------------------------------------------------------------
 # rpm target kept for backward compatibility
-package-dir    := $(srctree)/scripts/package
+package-dir    := scripts/package
 
 %src-pkg: FORCE
        $(Q)$(MAKE) $(build)=$(package-dir) $@
index c1d3d2d..b3c7509 100644 (file)
@@ -60,7 +60,7 @@ extern void read_decode_cache_bcr(void);
 #define ARC_REG_IC_IVIC                0x10
 #define ARC_REG_IC_CTRL                0x11
 #define ARC_REG_IC_IVIL                0x19
-#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
+#if defined(CONFIG_ARC_MMU_V3)
 #define ARC_REG_IC_PTAG                0x1E
 #endif
 
@@ -74,7 +74,7 @@ extern void read_decode_cache_bcr(void);
 #define ARC_REG_DC_IVDL                0x4A
 #define ARC_REG_DC_FLSH                0x4B
 #define ARC_REG_DC_FLDL                0x4C
-#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4)
+#if defined(CONFIG_ARC_MMU_V3)
 #define ARC_REG_DC_PTAG                0x5C
 #endif
 
index 2618cc1..76a7739 100644 (file)
@@ -11,6 +11,7 @@
 #ifndef _UAPI__ASM_ARC_PTRACE_H
 #define _UAPI__ASM_ARC_PTRACE_H
 
+#define PTRACE_GET_THREAD_AREA 25
 
 #ifndef __ASSEMBLY__
 /*
index 2ff0347..e248594 100644 (file)
@@ -10,9 +10,9 @@
  *  -This is the more "natural" hand written assembler
  */
 
+#include <linux/linkage.h>
 #include <asm/entry.h>       /* For the SAVE_* macros */
 #include <asm/asm-offsets.h>
-#include <asm/linkage.h>
 
 #define KSP_WORD_OFF   ((TASK_THREAD + THREAD_KSP) / 4)
 
index 0b3ef40..fffdb5e 100644 (file)
@@ -41,7 +41,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt)
 {
        const struct machine_desc *mdesc;
        unsigned long dt_root;
-       void *clk;
+       const void *clk;
        int len;
 
        if (!early_init_dt_scan(dt))
index 07a58f2..4d2481b 100644 (file)
@@ -77,10 +77,11 @@ stext:
        ; Clear BSS before updating any globals
        ; XXX: use ZOL here
        mov     r5, __bss_start
-       mov     r6, __bss_stop
+       sub     r6, __bss_stop, r5
+       lsr.f   lp_count, r6, 2
+       lpnz    1f
+       st.ab   0, [r5, 4]
 1:
-       st.ab   0, [r5,4]
-       brlt    r5, r6, 1b
 
        ; Uboot - kernel ABI
        ;    r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
index 5d76706..13b3ffb 100644 (file)
@@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
        pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
 
        switch (request) {
+       case PTRACE_GET_THREAD_AREA:
+               ret = put_user(task_thread_info(child)->thr_ptr,
+                              (unsigned long __user *)data);
+               break;
        default:
                ret = ptrace_request(child, request, addr, data);
                break;
index cf90b6f..c802bb5 100644 (file)
@@ -337,8 +337,19 @@ irqreturn_t do_IPI(int irq, void *dev_id)
  * API called by platform code to hookup arch-common ISR to their IPI IRQ
  */
 static DEFINE_PER_CPU(int, ipi_dev);
+
+static struct irqaction arc_ipi_irq = {
+        .name    = "IPI Interrupt",
+        .flags   = IRQF_PERCPU,
+        .handler = do_IPI,
+};
+
 int smp_ipi_irq_setup(int cpu, int irq)
 {
-       int *dev_id = &per_cpu(ipi_dev, smp_processor_id());
-       return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id);
+       if (!cpu)
+               return setup_irq(irq, &arc_ipi_irq);
+       else
+               arch_unmask_irq(irq);
+
+       return 0;
 }
index 2555f58..dd35bde 100644 (file)
@@ -116,7 +116,7 @@ SECTIONS
 
        _edata = .;
 
-       BSS_SECTION(0, 0, 0)
+       BSS_SECTION(4, 4, 4)
 
 #ifdef CONFIG_ARC_DW2_UNWIND
        . = ALIGN(PAGE_SIZE);
index 1f676c4..353b202 100644 (file)
@@ -389,7 +389,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
 /***********************************************************
  * Machine specific helper for per line I-Cache invalidate.
  */
-static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
                                unsigned long sz)
 {
        unsigned long flags;
@@ -405,6 +405,23 @@ static inline void __ic_entire_inv(void)
        read_aux_reg(ARC_REG_IC_CTRL);  /* blocks */
 }
 
+struct ic_line_inv_vaddr_ipi {
+       unsigned long paddr, vaddr;
+       int sz;
+};
+
+static void __ic_line_inv_vaddr_helper(void *info)
+{
+        struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info;
+        __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
+}
+
+static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
+                               unsigned long sz)
+{
+       struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz};
+       on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
+}
 #else
 
 #define __ic_entire_inv()
@@ -553,12 +570,8 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
  */
 void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
 {
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __ic_line_inv_vaddr(paddr, vaddr, len);
        __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
-       local_irq_restore(flags);
+       __ic_line_inv_vaddr(paddr, vaddr, len);
 }
 
 /* wrapper to compile time eliminate alignment checks in flush loop */
index 87b63fd..88acf8b 100644 (file)
@@ -6,6 +6,7 @@ config ARM
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
        select ARCH_HAVE_CUSTOM_GPIO_H
        select ARCH_MIGHT_HAVE_PC_PARPORT
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_USE_BUILTIN_BSWAP
        select ARCH_USE_CMPXCHG_LOCKREF
        select ARCH_WANT_IPC_PARSE_VERSION
@@ -175,13 +176,6 @@ config ARCH_HAS_ILOG2_U32
 config ARCH_HAS_ILOG2_U64
        bool
 
-config ARCH_HAS_CPUFREQ
-       bool
-       help
-         Internal node to signify that the ARCH has CPUFREQ support
-         and that the relevant menu configurations are displayed for
-         it.
-
 config ARCH_HAS_BANDGAP
        bool
 
@@ -318,7 +312,6 @@ config ARCH_MULTIPLATFORM
 
 config ARCH_INTEGRATOR
        bool "ARM Ltd. Integrator family"
-       select ARCH_HAS_CPUFREQ
        select ARM_AMBA
        select ARM_PATCH_PHYS_VIRT
        select AUTO_ZRELADDR
@@ -538,7 +531,6 @@ config ARCH_DOVE
 
 config ARCH_KIRKWOOD
        bool "Marvell Kirkwood"
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select CPU_FEROCEON
        select GENERIC_CLOCKEVENTS
@@ -637,7 +629,6 @@ config ARCH_LPC32XX
 config ARCH_PXA
        bool "PXA2xx/PXA3xx-based"
        depends on MMU
-       select ARCH_HAS_CPUFREQ
        select ARCH_MTD_XIP
        select ARCH_REQUIRE_GPIOLIB
        select ARM_CPU_SUSPEND if PM
@@ -707,7 +698,6 @@ config ARCH_RPC
 
 config ARCH_SA1100
        bool "SA1100-based"
-       select ARCH_HAS_CPUFREQ
        select ARCH_MTD_XIP
        select ARCH_REQUIRE_GPIOLIB
        select ARCH_SPARSEMEM_ENABLE
@@ -725,7 +715,6 @@ config ARCH_SA1100
 
 config ARCH_S3C24XX
        bool "Samsung S3C24XX SoCs"
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ATAGS
        select CLKDEV_LOOKUP
@@ -746,7 +735,6 @@ config ARCH_S3C24XX
 
 config ARCH_S3C64XX
        bool "Samsung S3C64XX"
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select ARM_VIC
@@ -809,7 +797,6 @@ config ARCH_S5PC100
 
 config ARCH_S5PV210
        bool "Samsung S5PV210/S5PC110"
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_SPARSEMEM_ENABLE
        select ATAGS
@@ -845,7 +832,6 @@ config ARCH_DAVINCI
 config ARCH_OMAP1
        bool "TI OMAP1"
        depends on MMU
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_OMAP
        select ARCH_REQUIRE_GPIOLIB
@@ -1009,8 +995,6 @@ source "arch/arm/mach-rockchip/Kconfig"
 
 source "arch/arm/mach-sa1100/Kconfig"
 
-source "arch/arm/plat-samsung/Kconfig"
-
 source "arch/arm/mach-socfpga/Kconfig"
 
 source "arch/arm/mach-spear/Kconfig"
@@ -1028,6 +1012,7 @@ source "arch/arm/mach-s5pc100/Kconfig"
 source "arch/arm/mach-s5pv210/Kconfig"
 
 source "arch/arm/mach-exynos/Kconfig"
+source "arch/arm/plat-samsung/Kconfig"
 
 source "arch/arm/mach-shmobile/Kconfig"
 
@@ -2109,9 +2094,7 @@ endmenu
 
 menu "CPU Power Management"
 
-if ARCH_HAS_CPUFREQ
 source "drivers/cpufreq/Kconfig"
-endif
 
 source "drivers/cpuidle/Kconfig"
 
index 5986ff6..adb5ed9 100644 (file)
@@ -357,7 +357,7 @@ dtb-$(CONFIG_ARCH_STI)+= stih407-b2120.dtb \
        stih415-b2020.dtb \
        stih416-b2000.dtb \
        stih416-b2020.dtb \
-       stih416-b2020-revE.dtb
+       stih416-b2020e.dtb
 dtb-$(CONFIG_MACH_SUN4I) += \
        sun4i-a10-a1000.dtb \
        sun4i-a10-cubieboard.dtb \
index ecb2677..e2156a5 100644 (file)
                serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
                        0 0 1 2
                >;
-               tx-num-evt = <1>;
-               rx-num-evt = <1>;
+               tx-num-evt = <32>;
+               rx-num-evt = <32>;
 };
 
 &tps {
index ab9a34c..80a3b21 100644 (file)
                serial-dir = <  /* 0: INACTIVE, 1: TX, 2: RX */
                        0 0 1 2
                >;
-               tx-num-evt = <1>;
-               rx-num-evt = <1>;
+               tx-num-evt = <32>;
+               rx-num-evt = <32>;
 };
 
 &tscadc {
index 8a0a72d..a1a0cc5 100644 (file)
 
 &cpsw_emac0 {
        phy_id = <&davinci_mdio>, <0>;
+       phy-mode = "rmii";
 };
 
 &cpsw_emac1 {
        phy_id = <&davinci_mdio>, <1>;
+       phy-mode = "rmii";
+};
+
+&phy_sel {
+       rmii-clock-ext;
 };
 
 &elm {
index 19f1f7e..90098f9 100644 (file)
        phy-mode = "rmii";
 };
 
+&phy_sel {
+       rmii-clock-ext;
+};
+
 &i2c0 {
        status = "okay";
        pinctrl-names = "default";
index 772fec2..1e2919d 100644 (file)
@@ -91,6 +91,8 @@
                                marvell,nand-keep-config;
                                marvell,nand-enable-arbiter;
                                nand-on-flash-bbt;
+                               nand-ecc-strength = <4>;
+                               nand-ecc-step-size = <512>;
 
                                partition@0 {
                                        label = "U-Boot";
index e69bc67..4173a8a 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 380 family SoC";
-       compatible = "marvell,armada380", "marvell,armada38x";
+       compatible = "marvell,armada380";
 
        cpus {
                #address-cells = <1>;
index ff9637d..1af886f 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 385 Development Board";
-       compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada380";
 
        chosen {
                bootargs = "console=ttyS0,115200 earlyprintk";
@@ -98,6 +98,8 @@
                                marvell,nand-keep-config;
                                marvell,nand-enable-arbiter;
                                nand-on-flash-bbt;
+                               nand-ecc-strength = <4>;
+                               nand-ecc-step-size = <512>;
 
                                partition@0 {
                                        label = "U-Boot";
index 4089325..aaca286 100644 (file)
@@ -17,7 +17,7 @@
 
 / {
        model = "Marvell Armada 385 Reference Design";
-       compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
 
        chosen {
                bootargs = "console=ttyS0,115200 earlyprintk";
index f011009..6283d79 100644 (file)
@@ -16,7 +16,7 @@
 
 / {
        model = "Marvell Armada 385 family SoC";
-       compatible = "marvell,armada385", "marvell,armada38x";
+       compatible = "marvell,armada385", "marvell,armada380";
 
        cpus {
                #address-cells = <1>;
index 3de364e..689fa1a 100644 (file)
@@ -20,7 +20,7 @@
 
 / {
        model = "Marvell Armada 38x family SoC";
-       compatible = "marvell,armada38x";
+       compatible = "marvell,armada380";
 
        aliases {
                gpio0 = &gpio0;
index e5c6a04..4e5a59e 100644 (file)
@@ -25,7 +25,7 @@
 
        memory {
                device_type = "memory";
-               reg = <0 0x00000000 0 0xC0000000>; /* 3 GB */
+               reg = <0 0x00000000 0 0x40000000>; /* 1 GB soldered on */
        };
 
        soc {
index b309c1c..04927db 100644 (file)
                                #size-cells = <0>;
                                #interrupt-cells = <1>;
 
-                               slow_rc_osc: slow_rc_osc {
-                                       compatible = "fixed-clock";
+                               main_osc: main_osc {
+                                       compatible = "atmel,at91rm9200-clk-main-osc";
                                        #clock-cells = <0>;
-                                       clock-frequency = <32768>;
-                                       clock-accuracy = <50000000>;
-                               };
-
-                               clk32k: slck {
-                                       compatible = "atmel,at91sam9260-clk-slow";
-                                       #clock-cells = <0>;
-                                       clocks = <&slow_rc_osc &slow_xtal>;
+                                       interrupts-extended = <&pmc AT91_PMC_MOSCS>;
+                                       clocks = <&main_xtal>;
                                };
 
                                main: mainck {
                                        compatible = "atmel,at91rm9200-clk-main";
                                        #clock-cells = <0>;
-                                       interrupts-extended = <&pmc AT91_PMC_MOSCS>;
-                                       clocks = <&main_xtal>;
+                                       clocks = <&main_osc>;
                                };
 
                                plla: pllack {
                                        compatible = "atmel,at91rm9200-clk-master";
                                        #clock-cells = <0>;
                                        interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
-                                       clocks = <&clk32k>, <&main>, <&plla>, <&pllb>;
+                                       clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
                                        atmel,clk-output-range = <0 94000000>;
                                        atmel,clk-divisors = <1 2 4 0>;
                                };
                                        #address-cells = <1>;
                                        #size-cells = <0>;
                                        interrupt-parent = <&pmc>;
-                                       clocks = <&clk32k>, <&main>, <&plla>, <&pllb>;
+                                       clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
 
                                        prog0: prog0 {
                                                #clock-cells = <0>;
index c6683ea..aa35a7a 100644 (file)
                reg = <0x20000000 0x4000000>;
        };
 
+       slow_xtal {
+               clock-frequency = <32768>;
+       };
+
        main_xtal {
                clock-frequency = <18432000>;
        };
index d1b82e6..b84bac5 100644 (file)
                                                                      <595000000 650000000 3 0>,
                                                                      <545000000 600000000 0 1>,
                                                                      <495000000 555000000 1 1>,
-                                                                     <445000000 500000000 1 2>,
-                                                                     <400000000 450000000 1 3>;
+                                                                     <445000000 500000000 2 1>,
+                                                                     <400000000 450000000 3 1>;
                                };
 
                                plladiv: plladivck {
                        compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00500000 0x00100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
+                       clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
                                 <&uhpck>;
                        clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
                        status = "disabled";
index 1a57298..2c0d6ea 100644 (file)
                                                                       595000000 650000000 3 0
                                                                       545000000 600000000 0 1
                                                                       495000000 555000000 1 1
-                                                                      445000000 500000000 1 2
-                                                                      400000000 450000000 1 3>;
+                                                                      445000000 500000000 2 1
+                                                                      400000000 450000000 3 1>;
                                };
 
                                plladiv: plladivck {
                                reg = <0x00500000 0x80000
                                       0xf803c000 0x400>;
                                interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
+                               clocks = <&usb>, <&udphs_clk>;
+                               clock-names = "hclk", "pclk";
                                status = "disabled";
 
                                ep0 {
                                compatible = "atmel,at91sam9rl-pwm";
                                reg = <0xf8034000 0x300>;
                                interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
+                               clocks = <&pwm_clk>;
                                #pwm-cells = <3>;
                                status = "disabled";
                        };
                        compatible = "atmel,at91rm9200-ohci", "usb-ohci";
                        reg = <0x00600000 0x100000>;
                        interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
-                       clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>,
-                                <&uhpck>;
+                       clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
                        clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
                        status = "disabled";
                };
index 4adc280..8308954 100644 (file)
                                        regulator-name = "ldo3";
                                        regulator-min-microvolt = <1800000>;
                                        regulator-max-microvolt = <1800000>;
+                                       regulator-always-on;
                                        regulator-boot-on;
                                };
 
index c29945e..8012763 100644 (file)
                        clocks = <&qspi_gfclk_div>;
                        clock-names = "fck";
                        num-cs = <4>;
-                       interrupts = <0 343 0x4>;
                        status = "disabled";
                };
 
                        #size-cells = <1>;
                        status = "disabled";
                };
+
+               atl: atl@4843c000 {
+                       compatible = "ti,dra7-atl";
+                       reg = <0x4843c000 0x3ff>;
+                       ti,hwmods = "atl";
+                       ti,provided-clocks = <&atl_clkin0_ck>, <&atl_clkin1_ck>,
+                                            <&atl_clkin2_ck>, <&atl_clkin3_ck>;
+                       clocks = <&atl_gfclk_mux>;
+                       clock-names = "fck";
+                       status = "disabled";
+               };
        };
 };
 
index b03cfe4..dc7a292 100644 (file)
 &cm_core_aon_clocks {
        atl_clkin0_ck: atl_clkin0_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        atl_clkin1_ck: atl_clkin1_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        atl_clkin2_ck: atl_clkin2_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        atl_clkin3_ck: atl_clkin3_ck {
                #clock-cells = <0>;
-               compatible = "fixed-clock";
-               clock-frequency = <0>;
+               compatible = "ti,dra7-atl-clock";
+               clocks = <&atl_gfclk_mux>;
        };
 
        hdmi_clkin_ck: hdmi_clkin_ck {
 
        l3_iclk_div: l3_iclk_div {
                #clock-cells = <0>;
-               compatible = "fixed-factor-clock";
+               compatible = "ti,divider-clock";
+               ti,max-div = <2>;
+               ti,bit-shift = <4>;
+               reg = <0x0100>;
                clocks = <&dpll_core_h12x2_ck>;
-               clock-mult = <1>;
-               clock-div = <1>;
+               ti,index-power-of-two;
        };
 
        l4_root_clk_div: l4_root_clk_div {
                compatible = "fixed-factor-clock";
                clocks = <&l3_iclk_div>;
                clock-mult = <1>;
-               clock-div = <1>;
+               clock-div = <2>;
        };
 
        video1_clk2_div: video1_clk2_div {
index b8ece4b..17b22e9 100644 (file)
                compatible = "arm,cortex-a9-gic";
                #interrupt-cells = <3>;
                interrupt-controller;
-               reg = <0x10490000 0x1000>, <0x10480000 0x100>;
+               reg = <0x10490000 0x10000>, <0x10480000 0x10000>;
        };
 
        combiner: interrupt-controller@10440000 {
                interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>;
                clocks = <&clock CLK_PWM>;
                clock-names = "timers";
-               #pwm-cells = <2>;
+               #pwm-cells = <3>;
                status = "disabled";
        };
 
index e385322..1595722 100644 (file)
                compatible = "samsung,exynos5420-audss-clock";
                reg = <0x03810000 0x0C>;
                #clock-cells = <1>;
-               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>,
+               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MAU_EPLL>,
                         <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>;
                clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
        };
        mfc_pd: power-domain@10044060 {
                compatible = "samsung,exynos4210-pd";
                reg = <0x10044060 0x20>;
+               clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
+                       <&clock CLK_MOUT_USER_ACLK333>;
+               clock-names = "oscclk", "pclk0", "clk0";
        };
 
        disp_pd: power-domain@100440C0 {
index ab1116d..83a5b86 100644 (file)
@@ -73,7 +73,7 @@
 
                L2: l2-cache {
                        compatible = "arm,pl310-cache";
-                       reg = <0xfc10000 0x100000>;
+                       reg = <0x100000 0x100000>;
                        interrupts = <0 15 4>;
                        cache-unified;
                        cache-level = <2>;
index 6bc3243..181d77f 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1>;
-       fsl,cd-controller;
-       fsl,wp-controller;
+       cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
+       wp-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
 
 &esdhc2 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc2>;
-       cd-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>;
+       cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
        wp-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
        status = "okay";
 };
                                MX51_PAD_SD1_DATA1__SD1_DATA1           0x20d5
                                MX51_PAD_SD1_DATA2__SD1_DATA2           0x20d5
                                MX51_PAD_SD1_DATA3__SD1_DATA3           0x20d5
-                               MX51_PAD_GPIO1_0__SD1_CD                0x20d5
-                               MX51_PAD_GPIO1_1__SD1_WP                0x20d5
+                               MX51_PAD_GPIO1_0__GPIO1_0               0x100
+                               MX51_PAD_GPIO1_1__GPIO1_1               0x100
                        >;
                };
 
index 75e66c9..31cfb7f 100644 (file)
 &esdhc1 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_esdhc1 &pinctrl_esdhc1_cd>;
-       fsl,cd-controller;
+       cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
        status = "okay";
 };
 
 
                pinctrl_esdhc1_cd: esdhc1_cd {
                        fsl,pins = <
-                               MX51_PAD_GPIO1_0__SD1_CD 0x20d5
+                               MX51_PAD_GPIO1_0__GPIO1_0 0xd5
                        >;
                };
 
index d5d146a..c4956b0 100644 (file)
                      <0xb0000000 0x20000000>;
        };
 
-       soc {
-               display1: display@di1 {
-                       compatible = "fsl,imx-parallel-display";
-                       interface-pix-fmt = "bgr666";
-                       pinctrl-names = "default";
-                       pinctrl-0 = <&pinctrl_ipu_disp1>;
-
-                       display-timings {
-                               800x480p60 {
-                                       native-mode;
-                                       clock-frequency = <31500000>;
-                                       hactive = <800>;
-                                       vactive = <480>;
-                                       hfront-porch = <40>;
-                                       hback-porch = <88>;
-                                       hsync-len = <128>;
-                                       vback-porch = <33>;
-                                       vfront-porch = <9>;
-                                       vsync-len = <3>;
-                                       vsync-active = <1>;
-                               };
+       display1: display@di1 {
+               compatible = "fsl,imx-parallel-display";
+               interface-pix-fmt = "bgr666";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_ipu_disp1>;
+
+               display-timings {
+                       800x480p60 {
+                               native-mode;
+                               clock-frequency = <31500000>;
+                               hactive = <800>;
+                               vactive = <480>;
+                               hfront-porch = <40>;
+                               hback-porch = <88>;
+                               hsync-len = <128>;
+                               vback-porch = <33>;
+                               vfront-porch = <9>;
+                               vsync-len = <3>;
+                               vsync-active = <1>;
                        };
                };
 
index 5373a5f..c8e51dd 100644 (file)
                        fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
                };
 
+               pinctrl_hummingboard_usbotg_id: hummingboard-usbotg-id {
+                       /*
+                        * Similar to pinctrl_usbotg_2, but we want it
+                        * pulled down for a fixed host connection.
+                        */
+                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+               };
+
                pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
                        fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
                };
 };
 
 &usbotg {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
        vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
index af4929a..0e1406e 100644 (file)
@@ -11,7 +11,7 @@
 
 /dts-v1/;
 #include "imx6q.dtsi"
-#include "imx6qdl-gw54xx.dtsi"
+#include "imx6qdl-gw51xx.dtsi"
 
 / {
        model = "Gateworks Ventana i.MX6 Quad GW51XX";
index 25da82a..e8e7816 100644 (file)
                pinctrl-0 = <&pinctrl_cubox_i_ir>;
        };
 
+       pwmleds {
+               compatible = "pwm-leds";
+               pinctrl-names = "default";
+               pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
+
+               front {
+                       active-low;
+                       label = "imx6:red:front";
+                       max-brightness = <248>;
+                       pwms = <&pwm1 0 50000>;
+               };
+       };
+
        regulators {
                compatible = "simple-bus";
 
                        >;
                };
 
+               pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
+                       fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
+               };
+
                pinctrl_cubox_i_spdif: cubox-i-spdif {
                        fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
                };
                        fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
                };
 
+               pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id {
+                       /*
+                        * The Cubox-i pulls this low, but as it's pointless
+                        * leaving it as a pull-up, even if it is just 10uA.
+                        */
+                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
+               };
+
                pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
                        fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
                };
 };
 
 &usbotg {
+       pinctrl-names = "default";
+       pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>;
        vbus-supply = <&reg_usbotg_vbus>;
        status = "okay";
 };
index 31665ad..0db15af 100644 (file)
        status = "okay";
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
index 367af3e..744c8a2 100644 (file)
        };
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
        codec: sgtl5000@0a {
                compatible = "fsl,sgtl5000";
                reg = <0x0a>;
-               clocks = <&clks 169>;
+               clocks = <&clks 201>;
                VDDA-supply = <&reg_1p8v>;
                VDDIO-supply = <&reg_3p3v>;
        };
index c91b5a6..adf150c 100644 (file)
        };
 
        pmic: ltc3676@3c {
-               compatible = "ltc,ltc3676";
+               compatible = "lltc,ltc3676";
                reg = <0x3c>;
 
                regulators {
index d729d0b..79eac68 100644 (file)
                                MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA    0x1b0b1
                        >;
                };
-
-               pinctrl_microsom_usbotg: microsom-usbotg {
-                       /*
-                        * Similar to pinctrl_usbotg_2, but we want it
-                        * pulled down for a fixed host connection.
-                        */
-                       fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
-               };
        };
 };
 
@@ -26,8 +18,3 @@
        pinctrl-0 = <&pinctrl_microsom_uart1>;
        status = "okay";
 };
-
-&usbotg {
-       pinctrl-names = "default";
-       pinctrl-0 = <&pinctrl_microsom_usbotg>;
-};
index 2d4e528..57d4abe 100644 (file)
                                compatible = "fsl,imx6sl-fec", "fsl,imx25-fec";
                                reg = <0x02188000 0x4000>;
                                interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>;
-                               clocks = <&clks IMX6SL_CLK_ENET_REF>,
+                               clocks = <&clks IMX6SL_CLK_ENET>,
                                         <&clks IMX6SL_CLK_ENET_REF>;
                                clock-names = "ipg", "ahb";
                                status = "disabled";
index c5a1fc7..b2d9834 100644 (file)
                compatible = "ethernet-phy-id0141.0cb0",
                             "ethernet-phy-ieee802.3-c22";
                reg = <0>;
-               phy-connection-type = "rgmii-id";
        };
 
        ethphy1: ethernet-phy@1 {
                compatible = "ethernet-phy-id0141.0cb0",
                             "ethernet-phy-ieee802.3-c22";
                reg = <1>;
-               phy-connection-type = "rgmii-id";
        };
 };
 
        status = "okay";
        ethernet0-port@0 {
                phy-handle = <&ethphy0>;
+               phy-connection-type = "rgmii-id";
        };
 };
 
        status = "okay";
        ethernet1-port@0 {
                phy-handle = <&ethphy1>;
+               phy-connection-type = "rgmii-id";
        };
 };
index cf0be66..1becefc 100644 (file)
                        codec {
                        };
                };
+
+               twl_power: power {
+                       compatible = "ti,twl4030-power-beagleboard-xm", "ti,twl4030-power-idle-osc-off";
+                       ti,use_poweroff;
+               };
        };
 };
 
 };
 
 &uart3 {
+       interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>;
        pinctrl-names = "default";
        pinctrl-0 = <&uart3_pins>;
 };
index 8ae8f00..c8747c7 100644 (file)
        gpios = <&twl_gpio 18 GPIO_ACTIVE_LOW>;
 };
 
+&twl {
+       twl_power: power {
+               compatible = "ti,twl4030-power-omap3-evm", "ti,twl4030-power-idle";
+               ti,use_poweroff;
+       };
+};
+
 &i2c2 {
        clock-frequency = <400000>;
 };
index ae8ae3f..b15f1a7 100644 (file)
                compatible = "ti,twl4030-audio";
                ti,enable-vibra = <1>;
        };
+
+       twl_power: power {
+               compatible = "ti,twl4030-power-n900";
+               ti,use_poweroff;
+       };
 };
 
 &twl_keypad {
index 3bfda16..a4ed549 100644 (file)
@@ -45,7 +45,6 @@
 
                        operating-points = <
                                /* kHz    uV */
-                               500000  880000
                                1000000 1060000
                                1500000 1250000
                        >;
index 8d7ffae..79f68ac 100644 (file)
                        #clock-cells = <0>;
                        clock-output-names = "sd1";
                };
-               sd2_clk: sd3_clk@e615007c {
+               sd2_clk: sd3_clk@e615026c {
                        compatible = "renesas,r8a7791-div6-clock", "renesas,cpg-div6-clock";
-                       reg = <0 0xe615007c 0 4>;
+                       reg = <0 0xe615026c 0 4>;
                        clocks = <&pll1_div2_clk>;
                        #clock-cells = <0>;
                        clock-output-names = "sd2";
index a6cb050..de5ed59 100644 (file)
@@ -74,7 +74,7 @@
                        };
 
                        macb0: ethernet@f0028000 {
-                               compatible = "cdns,pc302-gem", "cdns,gem";
+                               compatible = "atmel,sama5d3-gem";
                                reg = <0xf0028000 0x100>;
                                interrupts = <34 IRQ_TYPE_LEVEL_HIGH 3>;
                                pinctrl-names = "default";
index 4676f25..70fdd20 100644 (file)
                        clock-names = "stmmaceth";
                        resets = <&rst EMAC0_RESET>;
                        reset-names = "stmmaceth";
+                       snps,multicast-filter-bins = <256>;
+                       snps,perfect-filter-entries = <128>;
                        status = "disabled";
                };
 
                        clock-names = "stmmaceth";
                        resets = <&rst EMAC1_RESET>;
                        reset-names = "stmmaceth";
+                       snps,multicast-filter-bins = <256>;
+                       snps,perfect-filter-entries = <128>;
                        status = "disabled";
                };
 
index d6f254f..a0f6f75 100644 (file)
 
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a1_ls CLK_GMAC0_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
                };
 
                ethernet1: dwmac@fef08000 {
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii1>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a0_ls CLK_ETH1_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
                };
 
                rc: rc@fe518000 {
diff --git a/arch/arm/boot/dts/stih416-b2020-revE.dts b/arch/arm/boot/dts/stih416-b2020-revE.dts
deleted file mode 100644 (file)
index ba0fa2c..0000000
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2014 STMicroelectronics (R&D) Limited.
- * Author: Lee Jones <lee.jones@linaro.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * publishhed by the Free Software Foundation.
- */
-/dts-v1/;
-#include "stih416.dtsi"
-#include "stih41x-b2020.dtsi"
-/ {
-       model = "STiH416 B2020 REV-E";
-       compatible = "st,stih416-b2020", "st,stih416";
-
-       soc {
-               leds {
-                       compatible = "gpio-leds";
-                       red {
-                               #gpio-cells             = <1>;
-                               label                   = "Front Panel LED";
-                               gpios                   = <&PIO4 1>;
-                               linux,default-trigger   = "heartbeat";
-                       };
-                       green {
-                               gpios                   = <&PIO1 3>;
-                               default-state           = "off";
-                       };
-               };
-
-               ethernet1: dwmac@fef08000 {
-                       snps,reset-gpio = <&PIO0 7>;
-               };
-       };
-};
diff --git a/arch/arm/boot/dts/stih416-b2020e.dts b/arch/arm/boot/dts/stih416-b2020e.dts
new file mode 100644 (file)
index 0000000..ba0fa2c
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 STMicroelectronics (R&D) Limited.
+ * Author: Lee Jones <lee.jones@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * publishhed by the Free Software Foundation.
+ */
+/dts-v1/;
+#include "stih416.dtsi"
+#include "stih41x-b2020.dtsi"
+/ {
+       model = "STiH416 B2020 REV-E";
+       compatible = "st,stih416-b2020", "st,stih416";
+
+       soc {
+               leds {
+                       compatible = "gpio-leds";
+                       red {
+                               #gpio-cells             = <1>;
+                               label                   = "Front Panel LED";
+                               gpios                   = <&PIO4 1>;
+                               linux,default-trigger   = "heartbeat";
+                       };
+                       green {
+                               gpios                   = <&PIO1 3>;
+                               default-state           = "off";
+                       };
+               };
+
+               ethernet1: dwmac@fef08000 {
+                       snps,reset-gpio = <&PIO0 7>;
+               };
+       };
+};
index 06473c5..84758d7 100644 (file)
                        reset-names             = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii0>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a1_ls CLK_GMAC0_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
                };
 
                ethernet1: dwmac@fef08000 {
                        reset-names     = "stmmaceth";
                        pinctrl-names   = "default";
                        pinctrl-0       = <&pinctrl_mii1>;
-                       clock-names     = "stmmaceth";
-                       clocks          = <&clk_s_a0_ls CLK_ETH1_PHY>;
+                       clock-names     = "stmmaceth", "sti-ethclk";
+                       clocks          = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
                };
 
                rc: rc@fe518000 {
index 6ef146e..a20fa80 100644 (file)
@@ -182,7 +182,6 @@ static int scoop_probe(struct platform_device *pdev)
        struct scoop_config *inf;
        struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        int ret;
-       int temp;
 
        if (!mem)
                return -EINVAL;
index 9d13dae..4bf7226 100644 (file)
@@ -94,10 +94,10 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
 CONFIG_BACKLIGHT_PWM=y
 # CONFIG_USB_SUPPORT is not set
 CONFIG_MMC=y
-CONFIG_MMC_UNSAFE_RESUME=y
 CONFIG_MMC_BLOCK_MINORS=32
 CONFIG_MMC_TEST=y
 CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_BCM_KONA=y
 CONFIG_NEW_LEDS=y
 CONFIG_LEDS_CLASS=y
index ef88153..59b7e45 100644 (file)
@@ -186,6 +186,7 @@ CONFIG_VIDEO_MX3=y
 CONFIG_V4L_MEM2MEM_DRIVERS=y
 CONFIG_VIDEO_CODA=y
 CONFIG_SOC_CAMERA_OV2640=y
+CONFIG_IMX_IPUV3_CORE=y
 CONFIG_DRM=y
 CONFIG_DRM_PANEL_SIMPLE=y
 CONFIG_BACKLIGHT_LCD_SUPPORT=y
index e2d6204..5348364 100644 (file)
@@ -223,12 +223,12 @@ CONFIG_POWER_RESET_GPIO=y
 CONFIG_POWER_RESET_SUN6I=y
 CONFIG_SENSORS_LM90=y
 CONFIG_THERMAL=y
-CONFIG_DOVE_THERMAL=y
 CONFIG_ARMADA_THERMAL=y
 CONFIG_WATCHDOG=y
 CONFIG_ORION_WATCHDOG=y
 CONFIG_SUNXI_WATCHDOG=y
 CONFIG_MFD_AS3722=y
+CONFIG_MFD_BCM590XX=y
 CONFIG_MFD_CROS_EC=y
 CONFIG_MFD_CROS_EC_SPI=y
 CONFIG_MFD_MAX8907=y
@@ -240,6 +240,7 @@ CONFIG_MFD_TPS65910=y
 CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
 CONFIG_REGULATOR_AB8500=y
 CONFIG_REGULATOR_AS3722=y
+CONFIG_REGULATOR_BCM590XX=y
 CONFIG_REGULATOR_GPIO=y
 CONFIG_REGULATOR_MAX8907=y
 CONFIG_REGULATOR_PALMAS=y
@@ -300,6 +301,7 @@ CONFIG_MMC=y
 CONFIG_MMC_BLOCK_MINORS=16
 CONFIG_MMC_ARMMMCI=y
 CONFIG_MMC_SDHCI=y
+CONFIG_MMC_SDHCI_PLTFM=y
 CONFIG_MMC_SDHCI_OF_ARASAN=y
 CONFIG_MMC_SDHCI_ESDHC_IMX=y
 CONFIG_MMC_SDHCI_DOVE=y
@@ -352,6 +354,7 @@ CONFIG_MFD_NVEC=y
 CONFIG_KEYBOARD_NVEC=y
 CONFIG_SERIO_NVEC_PS2=y
 CONFIG_NVEC_POWER=y
+CONFIG_QCOM_GSBI=y
 CONFIG_COMMON_CLK_QCOM=y
 CONFIG_MSM_GCC_8660=y
 CONFIG_MSM_MMCC_8960=y
index e11170e..b0bfefa 100644 (file)
@@ -14,6 +14,7 @@ CONFIG_MACH_ARMADA_370=y
 CONFIG_MACH_ARMADA_375=y
 CONFIG_MACH_ARMADA_38X=y
 CONFIG_MACH_ARMADA_XP=y
+CONFIG_MACH_DOVE=y
 CONFIG_NEON=y
 # CONFIG_CACHE_L2X0 is not set
 # CONFIG_SWP_EMULATE is not set
@@ -52,6 +53,7 @@ CONFIG_INPUT_EVDEV=y
 CONFIG_KEYBOARD_GPIO=y
 CONFIG_SERIAL_8250=y
 CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
 CONFIG_I2C=y
 CONFIG_SPI=y
 CONFIG_SPI_ORION=y
index 59066cf..536a137 100644 (file)
@@ -32,6 +32,7 @@ CONFIG_SOC_OMAP5=y
 CONFIG_SOC_AM33XX=y
 CONFIG_SOC_AM43XX=y
 CONFIG_SOC_DRA7XX=y
+CONFIG_CACHE_L2X0=y
 CONFIG_ARM_THUMBEE=y
 CONFIG_ARM_ERRATA_411920=y
 CONFIG_SMP=y
index 4522366..15468fb 100644 (file)
@@ -137,7 +137,7 @@ static int aesbs_cbc_encrypt(struct blkcipher_desc *desc,
                                dst += AES_BLOCK_SIZE;
                        } while (--blocks);
                }
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -158,7 +158,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
                bsaes_cbc_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->dec, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        while (walk.nbytes) {
                u32 blocks = walk.nbytes / AES_BLOCK_SIZE;
@@ -182,7 +182,7 @@ static int aesbs_cbc_decrypt(struct blkcipher_desc *desc,
                        dst += AES_BLOCK_SIZE;
                        src += AES_BLOCK_SIZE;
                } while (--blocks);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -268,7 +268,7 @@ static int aesbs_xts_encrypt(struct blkcipher_desc *desc,
                bsaes_xts_encrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->enc, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
@@ -292,7 +292,7 @@ static int aesbs_xts_decrypt(struct blkcipher_desc *desc,
                bsaes_xts_decrypt(walk.src.virt.addr, walk.dst.virt.addr,
                                  walk.nbytes, &ctx->dec, walk.iv);
                kernel_neon_end();
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        return err;
 }
index eb577f4..39eb16b 100644 (file)
@@ -52,7 +52,7 @@ extern inline void *return_address(unsigned int level)
 
 #endif
 
-#define ftrace_return_addr(n) return_address(n)
+#define ftrace_return_address(n) return_address(n)
 
 #endif /* ifndef __ASSEMBLY__ */
 
index 060a75e..0406cb3 100644 (file)
@@ -50,6 +50,7 @@ struct machine_desc {
        struct smp_operations   *smp;           /* SMP operations       */
        bool                    (*smp_init)(void);
        void                    (*fixup)(struct tag *, char **);
+       void                    (*dt_fixup)(void);
        void                    (*init_meminfo)(void);
        void                    (*reserve)(void);/* reserve mem blocks  */
        void                    (*map_io)(void);/* IO mapping function  */
index d9702eb..94060ad 100644 (file)
@@ -208,8 +208,6 @@ struct sync_struct {
        struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
 };
 
-extern unsigned long sync_phys;        /* physical address of *mcpm_sync */
-
 void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
 void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
 void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
index f989d7c..e4e4208 100644 (file)
@@ -114,8 +114,14 @@ static inline struct thread_info *current_thread_info(void)
        ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
 #define thread_saved_sp(tsk)   \
        ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
+
+#ifndef CONFIG_THUMB2_KERNEL
 #define thread_saved_fp(tsk)   \
        ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
+#else
+#define thread_saved_fp(tsk)   \
+       ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
+#endif
 
 extern void crunch_task_disable(struct thread_info *);
 extern void crunch_task_copy(struct thread_info *, void *);
index e94a157..11c54de 100644 (file)
@@ -212,7 +212,7 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
        mdesc_best = &__mach_desc_GENERIC_DT;
 #endif
 
-       if (!dt_phys || !early_init_dt_scan(phys_to_virt(dt_phys)))
+       if (!dt_phys || !early_init_dt_verify(phys_to_virt(dt_phys)))
                return NULL;
 
        mdesc = of_flat_dt_match_machine(mdesc_best, arch_get_next_mach);
@@ -237,6 +237,12 @@ const struct machine_desc * __init setup_machine_fdt(unsigned int dt_phys)
                dump_machine_table(); /* does not return */
        }
 
+       /* We really don't want to do this, but sometimes firmware provides buggy data */
+       if (mdesc->dt_fixup)
+               mdesc->dt_fixup();
+
+       early_init_dt_scan_nodes();
+
        /* Change machine number to match the mdesc we're using */
        __machine_arch_type = mdesc->nr;
 
index 9db4b65..cb14242 100644 (file)
@@ -74,8 +74,6 @@ void kprobe_arm_test_cases(void)
        TEST_RRR( op "lt" s "   r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
        TEST_RR(  op "gt" s "   r12, r13"       ", r",14,val, ", ror r",14,7,"")\
        TEST_RR(  op "le" s "   r14, r",0, val, ", r13"       ", lsl r",14,8,"")\
-       TEST_RR(  op s "        r12, pc"        ", r",14,val, ", ror r",14,7,"")\
-       TEST_RR(  op s "        r14, r",0, val, ", pc"        ", lsl r",14,8,"")\
        TEST_R(   op "eq" s "   r0,  r",11,VAL1,", #0xf5")                      \
        TEST_R(   op "ne" s "   r11, r",0, VAL1,", #0xf5000000")                \
        TEST_R(   op s "        r7,  r",8, VAL2,", #0x000af000")                \
@@ -103,8 +101,6 @@ void kprobe_arm_test_cases(void)
        TEST_RRR( op "ge        r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")   \
        TEST_RR(  op "le        r13"       ", r",14,val, ", ror r",14,7,"")     \
        TEST_RR(  op "gt        r",0, val, ", r13"       ", lsl r",14,8,"")     \
-       TEST_RR(  op "  pc"        ", r",14,val, ", ror r",14,7,"")             \
-       TEST_RR(  op "  r",0, val, ", pc"        ", lsl r",14,8,"")             \
        TEST_R(   op "eq        r",11,VAL1,", #0xf5")                           \
        TEST_R(   op "ne        r",0, VAL1,", #0xf5000000")                     \
        TEST_R(   op "  r",8, VAL2,", #0x000af000")
@@ -125,7 +121,6 @@ void kprobe_arm_test_cases(void)
        TEST_RR(  op "ge" s "   r11, r",11,N(val),", asr r",7, 6,"")    \
        TEST_RR(  op "lt" s "   r12, r",11,val, ", ror r",14,7,"")      \
        TEST_R(   op "gt" s "   r14, r13"       ", lsl r",14,8,"")      \
-       TEST_R(   op "le" s "   r14, pc"        ", lsl r",14,8,"")      \
        TEST(     op "eq" s "   r0,  #0xf5")                            \
        TEST(     op "ne" s "   r11, #0xf5000000")                      \
        TEST(     op s "        r7,  #0x000af000")                      \
@@ -159,12 +154,19 @@ void kprobe_arm_test_cases(void)
        TEST_SUPPORTED("cmp     pc, #0x1000");
        TEST_SUPPORTED("cmp     sp, #0x1000");
 
-       /* Data-processing with PC as shift*/
+       /* Data-processing with PC and a shift count in a register */
        TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) "       @ cmp   r12, r14, asl pc")
        TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) "       @ mov   r12, r14, asl pc")
        TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) "       @ add   r10, r12, r14, asl pc")
-
-       /* Data-processing with PC as shift*/
+       TEST_UNSUPPORTED(__inst_arm(0xe151021f) "       @ cmp   r1, pc, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe17f0211) "       @ cmn   pc, r1, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe1a0121f) "       @ mov   r1, pc, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe1a0f211) "       @ mov   pc, r1, lsl r2")
+       TEST_UNSUPPORTED(__inst_arm(0xe042131f) "       @ sub   r1, r2, pc, lsl r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe1cf1312) "       @ bic   r1, pc, r2, lsl r3")
+       TEST_UNSUPPORTED(__inst_arm(0xe081f312) "       @ add   pc, r1, r2, lsl r3")
+
+       /* Data-processing with PC as a target and status registers updated */
        TEST_UNSUPPORTED("movs  pc, r1")
        TEST_UNSUPPORTED("movs  pc, r1, lsl r2")
        TEST_UNSUPPORTED("movs  pc, #0x10000")
@@ -187,14 +189,14 @@ void kprobe_arm_test_cases(void)
        TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"")
        TEST_BF_R ("add pc, r",14,2f-1f-8,", pc")
        TEST_BF_R ("mov pc, r",0,2f,"")
-       TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"")
+       TEST_BF_R ("add pc, pc, r",14,(2f-1f-8)*2,", asr #1")
        TEST_BB(   "sub pc, pc, #1b-2b+8")
 #if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
        TEST_BB(   "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
 #endif
        TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
        TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
-       TEST_RR(   "add pc, pc, r",10,-2,", asl r",11,1,"")
+       TEST_R(    "add pc, pc, r",10,-2,", asl #1")
 #ifdef CONFIG_THUMB2_KERNEL
        TEST_ARM_TO_THUMB_INTERWORK_R("add      pc, pc, r",0,3f-1f-8+1,"")
        TEST_ARM_TO_THUMB_INTERWORK_R("sub      pc, r",0,3f+8+1,", #8")
@@ -216,6 +218,7 @@ void kprobe_arm_test_cases(void)
        TEST_BB_R("bx   r",7,2f,"")
        TEST_BF_R("bxeq r",14,2f,"")
 
+#if __LINUX_ARM_ARCH__ >= 5
        TEST_R("clz     r0, r",0, 0x0,"")
        TEST_R("clzeq   r7, r",14,0x1,"")
        TEST_R("clz     lr, r",7, 0xffffffff,"")
@@ -337,6 +340,7 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
        TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
        TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
+#endif
 
        TEST_GROUP("Multiply and multiply-accumulate")
 
@@ -559,6 +563,7 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED("ldrsht        r1, [r2], #48")
 #endif
 
+#if __LINUX_ARM_ARCH__ >= 5
        TEST_RPR(  "strd        r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
        TEST_RPR(  "strccd      r",8, VAL2,", [r",13,0, ", r",12,48,"]")
        TEST_RPR(  "strd        r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
@@ -595,6 +600,7 @@ void kprobe_arm_test_cases(void)
        TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) "       @ ldrd r12, [pc, #48]!")
        TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) "       @ ldrd pc, [r9], #48")
        TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) "       @ ldrd lr, [r9], #48")
+#endif
 
        TEST_GROUP("Miscellaneous")
 
@@ -1227,7 +1233,9 @@ void kprobe_arm_test_cases(void)
        TEST_COPROCESSOR( "mrc"two"     0, 0, r0, cr0, cr0, 0")
 
        COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
+#if __LINUX_ARM_ARCH__ >= 5
        COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
+#endif
        TEST_UNSUPPORTED("svc   0")
        TEST_UNSUPPORTED("svc   0xffffff")
 
@@ -1287,7 +1295,9 @@ void kprobe_arm_test_cases(void)
        TEST(   "blx    __dummy_thumb_subroutine_odd")
 #endif /* __LINUX_ARM_ARCH__ >= 6 */
 
+#if __LINUX_ARM_ARCH__ >= 5
        COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
+#endif
 #if __LINUX_ARM_ARCH__ >= 6
        COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
 #endif
index 3796399..08d7312 100644 (file)
@@ -225,6 +225,7 @@ static int pre_handler_called;
 static int post_handler_called;
 static int jprobe_func_called;
 static int kretprobe_handler_called;
+static int tests_failed;
 
 #define FUNC_ARG1 0x12345678
 #define FUNC_ARG2 0xabcdef
@@ -461,6 +462,13 @@ static int run_api_tests(long (*func)(long, long))
 
        pr_info("    jprobe\n");
        ret = test_jprobe(func);
+#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
+       if (ret == -EINVAL) {
+               pr_err("FAIL: Known longtime bug with jprobe on Thumb kernels\n");
+               tests_failed = ret;
+               ret = 0;
+       }
+#endif
        if (ret < 0)
                return ret;
 
@@ -1671,6 +1679,8 @@ static int __init run_all_tests(void)
 #endif
 
 out:
+       if (ret == 0)
+               ret = tests_failed;
        if (ret == 0)
                pr_info("Finished kprobe tests OK\n");
        else
index 2037f72..1d37568 100644 (file)
@@ -1924,7 +1924,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
                                   struct perf_event *event)
 {
        int idx;
-       int bit;
+       int bit = -1;
        unsigned int prefix;
        unsigned int region;
        unsigned int code;
@@ -1953,7 +1953,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
        }
 
        idx = armv7pmu_get_event_idx(cpuc, event);
-       if (idx < 0 && krait_event)
+       if (idx < 0 && bit >= 0)
                clear_bit(bit, cpuc->used_mask);
 
        return idx;
index 51a13a0..8eaef81 100644 (file)
@@ -341,12 +341,12 @@ static const union decode_item arm_cccc_000x_table[] = {
        /* CMP (reg-shift reg)  cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
        /* CMN (reg-shift reg)  cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
        DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG,
-                                                REGS(ANY, 0, NOPC, 0, ANY)),
+                                                REGS(NOPC, 0, NOPC, 0, NOPC)),
 
        /* MOV (reg-shift reg)  cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
        /* MVN (reg-shift reg)  cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
        DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG,
-                                                REGS(0, ANY, NOPC, 0, ANY)),
+                                                REGS(0, NOPC, NOPC, 0, NOPC)),
 
        /* AND (reg-shift reg)  cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
        /* EOR (reg-shift reg)  cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
@@ -359,7 +359,7 @@ static const union decode_item arm_cccc_000x_table[] = {
        /* ORR (reg-shift reg)  cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
        /* BIC (reg-shift reg)  cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
        DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG,
-                                                REGS(ANY, ANY, NOPC, 0, ANY)),
+                                                REGS(NOPC, NOPC, NOPC, 0, NOPC)),
 
        DECODE_END
 };
index 0dd3b79..0c27ed6 100644 (file)
@@ -908,7 +908,7 @@ enum ptrace_syscall_dir {
        PTRACE_SYSCALL_EXIT,
 };
 
-static int tracehook_report_syscall(struct pt_regs *regs,
+static void tracehook_report_syscall(struct pt_regs *regs,
                                    enum ptrace_syscall_dir dir)
 {
        unsigned long ip;
@@ -926,7 +926,6 @@ static int tracehook_report_syscall(struct pt_regs *regs,
                current_thread_info()->syscall = -1;
 
        regs->ARM_ip = ip;
-       return current_thread_info()->syscall;
 }
 
 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
@@ -938,7 +937,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
                return -1;
 
        if (test_thread_flag(TIF_SYSCALL_TRACE))
-               scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+               tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
+
+       scno = current_thread_info()->syscall;
 
        if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
                trace_sys_enter(regs, scno);
index 9d85318..e35d880 100644 (file)
@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
                cpu_topology[cpuid].socket_id, mpidr);
 }
 
-static inline const int cpu_corepower_flags(void)
+static inline int cpu_corepower_flags(void)
 {
        return SD_SHARE_PKG_RESOURCES  | SD_SHARE_POWERDOMAIN;
 }
index 9bc6db1..41c8391 100644 (file)
@@ -1,10 +1,9 @@
-config ARCH_BCM
+menuconfig ARCH_BCM
        bool "Broadcom SoC Support" if ARCH_MULTI_V6_V7
        help
          This enables support for Broadcom ARM based SoC chips
 
-menu "Broadcom SoC Selection"
-       depends on ARCH_BCM
+if ARCH_BCM
 
 config ARCH_BCM_MOBILE
        bool "Broadcom Mobile SoC Support" if ARCH_MULTI_V7
@@ -88,4 +87,4 @@ config ARCH_BCM_5301X
          different SoC or with the older BCM47XX and BCM53XX based
          network SoC using a MIPS CPU, they are supported by arch/mips/bcm47xx
 
-endmenu
+endif
index 101e0f3..2631cfc 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_BERLIN
+menuconfig ARCH_BERLIN
        bool "Marvell Berlin SoCs" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
        select ARM_GIC
@@ -9,8 +9,6 @@ config ARCH_BERLIN
 
 if ARCH_BERLIN
 
-menu "Marvell Berlin SoC variants"
-
 config MACH_BERLIN_BG2
        bool "Marvell Armada 1500 (BG2)"
        select CACHE_L2X0
@@ -30,6 +28,4 @@ config MACH_BERLIN_BG2Q
        select HAVE_ARM_TWD if SMP
        select PINCTRL_BERLIN_BG2Q
 
-endmenu
-
 endif
index 66838f4..3c22a19 100644 (file)
@@ -1,12 +1,11 @@
-config ARCH_CNS3XXX
+menuconfig ARCH_CNS3XXX
        bool "Cavium Networks CNS3XXX family" if ARCH_MULTI_V6
        select ARM_GIC
        select PCI_DOMAINS if PCI
        help
          Support for Cavium Networks CNS3XXX platform.
 
-menu "CNS3XXX platform type"
-       depends on ARCH_CNS3XXX
+if ARCH_CNS3XXX
 
 config MACH_CNS3420VB
        bool "Support for CNS3420 Validation Board"
@@ -17,4 +16,4 @@ config MACH_CNS3420VB
          This is a platform with an on-board ARM11 MPCore and has support
          for USB, USB-OTG, MMC/SD/SDIO, SATA, PCI-E, etc.
 
-endmenu
+endif
index db18ef8..584e8d4 100644 (file)
@@ -39,7 +39,6 @@ config ARCH_DAVINCI_DA830
 config ARCH_DAVINCI_DA850
        bool "DA850/OMAP-L138/AM18x based system"
        select ARCH_DAVINCI_DA8XX
-       select ARCH_HAS_CPUFREQ
        select CP_INTC
 
 config ARCH_DAVINCI_DA8XX
index d58995c..8f9b66c 100644 (file)
@@ -7,10 +7,9 @@
 
 # Configuration options for the EXYNOS4
 
-config ARCH_EXYNOS
+menuconfig ARCH_EXYNOS
        bool "Samsung EXYNOS" if ARCH_MULTI_V7
        select ARCH_HAS_BANDGAP
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
@@ -30,8 +29,6 @@ config ARCH_EXYNOS
 
 if ARCH_EXYNOS
 
-menu "SAMSUNG EXYNOS SoCs Support"
-
 config ARCH_EXYNOS3
        bool "SAMSUNG EXYNOS3"
        select ARM_CPU_SUSPEND if PM
@@ -118,8 +115,6 @@ config SOC_EXYNOS5800
        default y
        depends on SOC_EXYNOS5420
 
-endmenu
-
 config EXYNOS5420_MCPM
        bool "Exynos5420 Multi-Cluster PM support"
        depends on MCPM && SOC_EXYNOS5420
index 16617bd..1ee9176 100644 (file)
@@ -118,6 +118,7 @@ extern void __iomem *sysram_ns_base_addr;
 extern void __iomem *sysram_base_addr;
 void exynos_init_io(void);
 void exynos_restart(enum reboot_mode mode, const char *cmd);
+void exynos_sysram_init(void);
 void exynos_cpuidle_init(void);
 void exynos_cpufreq_init(void);
 void exynos_init_late(void);
index 90aab4d..66c9b96 100644 (file)
@@ -173,10 +173,8 @@ static struct platform_device exynos_cpuidle = {
 
 void __init exynos_cpuidle_init(void)
 {
-       if (soc_is_exynos5440())
-               return;
-
-       platform_device_register(&exynos_cpuidle);
+       if (soc_is_exynos4210() || soc_is_exynos5250())
+               platform_device_register(&exynos_cpuidle);
 }
 
 void __init exynos_cpufreq_init(void)
@@ -184,6 +182,28 @@ void __init exynos_cpufreq_init(void)
        platform_device_register_simple("exynos-cpufreq", -1, NULL, 0);
 }
 
+void __iomem *sysram_base_addr;
+void __iomem *sysram_ns_base_addr;
+
+void __init exynos_sysram_init(void)
+{
+       struct device_node *node;
+
+       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram") {
+               if (!of_device_is_available(node))
+                       continue;
+               sysram_base_addr = of_iomap(node, 0);
+               break;
+       }
+
+       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram-ns") {
+               if (!of_device_is_available(node))
+                       continue;
+               sysram_ns_base_addr = of_iomap(node, 0);
+               break;
+       }
+}
+
 void __init exynos_init_late(void)
 {
        if (of_machine_is_compatible("samsung,exynos5440"))
@@ -198,7 +218,7 @@ static int __init exynos_fdt_map_chipid(unsigned long node, const char *uname,
                                        int depth, void *data)
 {
        struct map_desc iodesc;
-       __be32 *reg;
+       const __be32 *reg;
        int len;
 
        if (!of_flat_dt_is_compatible(node, "samsung,exynos4210-chipid") &&
@@ -271,6 +291,13 @@ static void __init exynos_dt_machine_init(void)
                }
        }
 
+       /*
+        * This is called from smp_prepare_cpus if we've built for SMP, but
+        * we still need to set it up for PM and firmware ops if not.
+        */
+       if (!IS_ENABLED(CONFIG_SMP))
+               exynos_sysram_init();
+
        exynos_cpuidle_init();
        exynos_cpufreq_init();
 
@@ -308,6 +335,15 @@ static void __init exynos_reserve(void)
 #endif
 }
 
+static void __init exynos_dt_fixup(void)
+{
+       /*
+        * Some versions of uboot pass garbage entries in the memory node,
+        * use the old CONFIG_ARM_NR_BANKS
+        */
+       of_fdt_limit_memory(8);
+}
+
 DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
        /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
        /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
@@ -321,4 +357,5 @@ DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
        .dt_compat      = exynos_dt_compat,
        .restart        = exynos_restart,
        .reserve        = exynos_reserve,
+       .dt_fixup       = exynos_dt_fixup,
 MACHINE_END
index eb91d23..e8797bb 100644 (file)
@@ -57,8 +57,13 @@ static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
 
        boot_reg = sysram_ns_base_addr + 0x1c;
 
-       if (!soc_is_exynos4212() && !soc_is_exynos3250())
-               boot_reg += 4*cpu;
+       /*
+        * Almost all Exynos-series of SoCs that run in secure mode don't need
+        * additional offset for every CPU, with Exynos4412 being the only
+        * exception.
+        */
+       if (soc_is_exynos4412())
+               boot_reg += 4 * cpu;
 
        __raw_writel(boot_addr, boot_reg);
        return 0;
index 69fa483..920a4ba 100644 (file)
@@ -40,21 +40,17 @@ static inline void cpu_leave_lowpower(void)
 
 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
 {
+       u32 mpidr = cpu_logical_map(cpu);
+       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+
        for (;;) {
 
-               /* make cpu1 to be turned off at next WFI command */
-               if (cpu == 1)
-                       exynos_cpu_power_down(cpu);
+               /* Turn the CPU off on next WFI instruction. */
+               exynos_cpu_power_down(core_id);
 
-               /*
-                * here's the WFI
-                */
-               asm(".word      0xe320f003\n"
-                   :
-                   :
-                   : "memory", "cc");
+               wfi();
 
-               if (pen_release == cpu_logical_map(cpu)) {
+               if (pen_release == core_id) {
                        /*
                         * OK, proper wakeup, we're done
                         */
index 0498d0b..ace0ed6 100644 (file)
@@ -25,7 +25,6 @@
 
 #define EXYNOS5420_CPUS_PER_CLUSTER    4
 #define EXYNOS5420_NR_CLUSTERS         2
-#define MCPM_BOOT_ADDR_OFFSET          0x1c
 
 /*
  * The common v7_exit_coherency_flush API could not be used because of the
@@ -343,11 +342,13 @@ static int __init exynos_mcpm_init(void)
        pr_info("Exynos MCPM support installed\n");
 
        /*
-        * Future entries into the kernel can now go
-        * through the cluster entry vectors.
+        * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr
+        * as part of secondary_cpu_start().  Let's redirect it to the
+        * mcpm_entry_point().
         */
-       __raw_writel(virt_to_phys(mcpm_entry_point),
-                       ns_sram_base_addr + MCPM_BOOT_ADDR_OFFSET);
+       __raw_writel(0xe59f0000, ns_sram_base_addr);     /* ldr r0, [pc, #0] */
+       __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx  r0 */
+       __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
 
        iounmap(ns_sram_base_addr);
 
index ec02422..50b9aad 100644 (file)
 
 extern void exynos4_secondary_startup(void);
 
-void __iomem *sysram_base_addr;
-void __iomem *sysram_ns_base_addr;
-
-static void __init exynos_smp_prepare_sysram(void)
-{
-       struct device_node *node;
-
-       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram") {
-               if (!of_device_is_available(node))
-                       continue;
-               sysram_base_addr = of_iomap(node, 0);
-               break;
-       }
-
-       for_each_compatible_node(node, NULL, "samsung,exynos4210-sysram-ns") {
-               if (!of_device_is_available(node))
-                       continue;
-               sysram_ns_base_addr = of_iomap(node, 0);
-               break;
-       }
-}
-
 static inline void __iomem *cpu_boot_reg_base(void)
 {
        if (soc_is_exynos4210() && samsung_rev() == EXYNOS4210_REV_1_1)
@@ -112,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
 static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
 {
        unsigned long timeout;
-       unsigned long phys_cpu = cpu_logical_map(cpu);
+       u32 mpidr = cpu_logical_map(cpu);
+       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
        int ret = -ENOSYS;
 
        /*
@@ -126,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
         * the holding pen - release it, then wait for it to flag
         * that it has been released by resetting pen_release.
         *
-        * Note that "pen_release" is the hardware CPU ID, whereas
+        * Note that "pen_release" is the hardware CPU core ID, whereas
         * "cpu" is Linux's internal ID.
         */
-       write_pen_release(phys_cpu);
+       write_pen_release(core_id);
 
-       if (!exynos_cpu_power_state(cpu)) {
-               exynos_cpu_power_up(cpu);
+       if (!exynos_cpu_power_state(core_id)) {
+               exynos_cpu_power_up(core_id);
                timeout = 10;
 
                /* wait max 10 ms until cpu1 is on */
-               while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) {
+               while (exynos_cpu_power_state(core_id)
+                      != S5P_CORE_LOCAL_PWR_EN) {
                        if (timeout-- == 0)
                                break;
 
@@ -167,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
                 * Try to set boot address using firmware first
                 * and fall back to boot register if it fails.
                 */
-               ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+               ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
                if (ret && ret != -ENOSYS)
                        goto fail;
                if (ret == -ENOSYS) {
-                       void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+                       void __iomem *boot_reg = cpu_boot_reg(core_id);
 
                        if (IS_ERR(boot_reg)) {
                                ret = PTR_ERR(boot_reg);
                                goto fail;
                        }
-                       __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+                       __raw_writel(boot_addr, cpu_boot_reg(core_id));
                }
 
-               call_firmware_op(cpu_boot, phys_cpu);
+               call_firmware_op(cpu_boot, core_id);
 
                arch_send_wakeup_ipi_mask(cpumask_of(cpu));
 
@@ -234,11 +214,11 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
 {
        int i;
 
+       exynos_sysram_init();
+
        if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(scu_base_addr());
 
-       exynos_smp_prepare_sysram();
-
        /*
         * Write the address of secondary startup into the
         * system-wide flags register. The boot monitor waits
@@ -249,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
         * boot register if it fails.
         */
        for (i = 1; i < max_cpus; ++i) {
-               unsigned long phys_cpu;
                unsigned long boot_addr;
+               u32 mpidr;
+               u32 core_id;
                int ret;
 
-               phys_cpu = cpu_logical_map(i);
+               mpidr = cpu_logical_map(i);
+               core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
                boot_addr = virt_to_phys(exynos4_secondary_startup);
 
-               ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr);
+               ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
                if (ret && ret != -ENOSYS)
                        break;
                if (ret == -ENOSYS) {
-                       void __iomem *boot_reg = cpu_boot_reg(phys_cpu);
+                       void __iomem *boot_reg = cpu_boot_reg(core_id);
 
                        if (IS_ERR(boot_reg))
                                break;
-                       __raw_writel(boot_addr, cpu_boot_reg(phys_cpu));
+                       __raw_writel(boot_addr, cpu_boot_reg(core_id));
                }
        }
 }
index 87c0d34..202ca73 100644 (file)
@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
        tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
        __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_save_register();
 
        return 0;
@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
        if (exynos_pm_central_resume())
                goto early_wakeup;
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                exynos_cpu_restore_register();
 
        /* For release retention */
@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
 
        s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
 
-       if (!soc_is_exynos5250())
+       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(S5P_VA_SCU);
 
 early_wakeup:
@@ -440,15 +440,18 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
        case CPU_PM_ENTER:
                if (cpu == 0) {
                        exynos_pm_central_suspend();
-                       exynos_cpu_save_register();
+                       if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
+                               exynos_cpu_save_register();
                }
                break;
 
        case CPU_PM_EXIT:
                if (cpu == 0) {
-                       if (!soc_is_exynos5250())
+                       if (read_cpuid_part_number() ==
+                                       ARM_CPU_PART_CORTEX_A9) {
                                scu_enable(S5P_VA_SCU);
-                       exynos_cpu_restore_register();
+                               exynos_cpu_restore_register();
+                       }
                        exynos_pm_central_resume();
                }
                break;
index fe6570e..797cb13 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/err.h>
 #include <linux/slab.h>
 #include <linux/pm_domain.h>
+#include <linux/clk.h>
 #include <linux/delay.h>
 #include <linux/of_address.h>
 #include <linux/of_platform.h>
@@ -24,6 +25,8 @@
 
 #include "regs-pmu.h"
 
+#define MAX_CLK_PER_DOMAIN     4
+
 /*
  * Exynos specific wrapper around the generic power domain
  */
@@ -32,6 +35,9 @@ struct exynos_pm_domain {
        char const *name;
        bool is_off;
        struct generic_pm_domain pd;
+       struct clk *oscclk;
+       struct clk *clk[MAX_CLK_PER_DOMAIN];
+       struct clk *pclk[MAX_CLK_PER_DOMAIN];
 };
 
 static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -44,6 +50,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
        pd = container_of(domain, struct exynos_pm_domain, pd);
        base = pd->base;
 
+       /* Set oscclk before powering off a domain*/
+       if (!power_on) {
+               int i;
+
+               for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+                       if (IS_ERR(pd->clk[i]))
+                               break;
+                       if (clk_set_parent(pd->clk[i], pd->oscclk))
+                               pr_err("%s: error setting oscclk as parent to clock %d\n",
+                                               pd->name, i);
+               }
+       }
+
        pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
        __raw_writel(pwr, base);
 
@@ -60,6 +79,20 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
                cpu_relax();
                usleep_range(80, 100);
        }
+
+       /* Restore clocks after powering on a domain*/
+       if (power_on) {
+               int i;
+
+               for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+                       if (IS_ERR(pd->clk[i]))
+                               break;
+                       if (clk_set_parent(pd->clk[i], pd->pclk[i]))
+                               pr_err("%s: error setting parent to clock%d\n",
+                                               pd->name, i);
+               }
+       }
+
        return 0;
 }
 
@@ -152,9 +185,11 @@ static __init int exynos4_pm_init_power_domain(void)
 
        for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
                struct exynos_pm_domain *pd;
-               int on;
+               int on, i;
+               struct device *dev;
 
                pdev = of_find_device_by_node(np);
+               dev = &pdev->dev;
 
                pd = kzalloc(sizeof(*pd), GFP_KERNEL);
                if (!pd) {
@@ -170,6 +205,30 @@ static __init int exynos4_pm_init_power_domain(void)
                pd->pd.power_on = exynos_pd_power_on;
                pd->pd.of_node = np;
 
+               pd->oscclk = clk_get(dev, "oscclk");
+               if (IS_ERR(pd->oscclk))
+                       goto no_clk;
+
+               for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
+                       char clk_name[8];
+
+                       snprintf(clk_name, sizeof(clk_name), "clk%d", i);
+                       pd->clk[i] = clk_get(dev, clk_name);
+                       if (IS_ERR(pd->clk[i]))
+                               break;
+                       snprintf(clk_name, sizeof(clk_name), "pclk%d", i);
+                       pd->pclk[i] = clk_get(dev, clk_name);
+                       if (IS_ERR(pd->pclk[i])) {
+                               clk_put(pd->clk[i]);
+                               pd->clk[i] = ERR_PTR(-EINVAL);
+                               break;
+                       }
+               }
+
+               if (IS_ERR(pd->clk[0]))
+                       clk_put(pd->oscclk);
+
+no_clk:
                platform_set_drvdata(pdev, pd);
 
                on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
index 830b76e..a5960e2 100644 (file)
@@ -1,7 +1,6 @@
 config ARCH_HIGHBANK
        bool "Calxeda ECX-1000/2000 (Highbank/Midway)" if ARCH_MULTI_V7
        select ARCH_DMA_ADDR_T_64BIT if ARM_LPAE
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_HAS_OPP
        select ARCH_SUPPORTS_BIG_ENDIAN
index 8d42eab..4b51857 100644 (file)
@@ -1,6 +1,5 @@
-config ARCH_MXC
+menuconfig ARCH_MXC
        bool "Freescale i.MX family" if ARCH_MULTI_V4_V5 || ARCH_MULTI_V6_V7
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_REQUIRE_GPIOLIB
        select ARM_CPU_SUSPEND if PM
@@ -13,8 +12,7 @@ config ARCH_MXC
        help
          Support for Freescale MXC/iMX-based family of processors
 
-menu "Freescale i.MX support"
-       depends on ARCH_MXC
+if ARCH_MXC
 
 config MXC_TZIC
        bool
@@ -99,7 +97,6 @@ config SOC_IMX25
 
 config SOC_IMX27
        bool
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select CPU_ARM926T
        select IMX_HAVE_IOMUX_V1
@@ -124,7 +121,6 @@ config SOC_IMX35
 
 config SOC_IMX5
        bool
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_MXC_IOMUX_V3
        select MXC_TZIC
@@ -738,9 +734,9 @@ config SOC_IMX6
        select HAVE_IMX_MMDC
        select HAVE_IMX_SRC
        select MFD_SYSCON
-       select PL310_ERRATA_588369 if CACHE_PL310
-       select PL310_ERRATA_727915 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
 
 config SOC_IMX6Q
        bool "i.MX6 Quad/DualLite support"
@@ -775,9 +771,9 @@ config SOC_VF610
        select ARM_GIC
        select PINCTRL_VF610
        select VF_PIT_TIMER
-       select PL310_ERRATA_588369 if CACHE_PL310
-       select PL310_ERRATA_727915 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
 
        help
          This enable support for Freescale Vybrid VF610 processor.
@@ -786,4 +782,4 @@ endif
 
 source "arch/arm/mach-imx/devices/Kconfig"
 
-endmenu
+endif
index 4ba587d..84acdfd 100644 (file)
@@ -67,8 +67,12 @@ static void clk_gate2_disable(struct clk_hw *hw)
 
        spin_lock_irqsave(gate->lock, flags);
 
-       if (gate->share_count && --(*gate->share_count) > 0)
-               goto out;
+       if (gate->share_count) {
+               if (WARN_ON(*gate->share_count == 0))
+                       goto out;
+               else if (--(*gate->share_count) > 0)
+                       goto out;
+       }
 
        reg = readl(gate->reg);
        reg &= ~(3 << gate->bit_idx);
@@ -78,19 +82,26 @@ out:
        spin_unlock_irqrestore(gate->lock, flags);
 }
 
-static int clk_gate2_is_enabled(struct clk_hw *hw)
+static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
 {
-       u32 reg;
-       struct clk_gate2 *gate = to_clk_gate2(hw);
+       u32 val = readl(reg);
 
-       reg = readl(gate->reg);
-
-       if (((reg >> gate->bit_idx) & 1) == 1)
+       if (((val >> bit_idx) & 1) == 1)
                return 1;
 
        return 0;
 }
 
+static int clk_gate2_is_enabled(struct clk_hw *hw)
+{
+       struct clk_gate2 *gate = to_clk_gate2(hw);
+
+       if (gate->share_count)
+               return !!(*gate->share_count);
+       else
+               return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
+}
+
 static struct clk_ops clk_gate2_ops = {
        .enable = clk_gate2_enable,
        .disable = clk_gate2_disable,
@@ -116,6 +127,10 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
        gate->bit_idx = bit_idx;
        gate->flags = clk_gate2_flags;
        gate->lock = lock;
+
+       /* Initialize share_count per hardware state */
+       if (share_count)
+               *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0;
        gate->share_count = share_count;
 
        init.name = name;
index 8e795de..8556c78 100644 (file)
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
 static const char *lvds_sels[] = {
        "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
        "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
-       "pcie_ref", "sata_ref",
+       "pcie_ref_125m", "sata_ref_100m",
 };
 
 enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
 
        /* All existing boards with PCIe use LVDS1 */
        if (IS_ENABLED(CONFIG_PCI_IMX6))
-               clk_set_parent(clk[lvds1_sel], clk[sata_ref]);
+               clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
 
        /* Set initial power mode */
        imx6q_set_lpm(WAIT_CLOCKED);
index 21cf06c..5408ca7 100644 (file)
@@ -312,6 +312,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
        clks[IMX6SL_CLK_ECSPI2]       = imx_clk_gate2("ecspi2",       "ecspi_root",        base + 0x6c, 2);
        clks[IMX6SL_CLK_ECSPI3]       = imx_clk_gate2("ecspi3",       "ecspi_root",        base + 0x6c, 4);
        clks[IMX6SL_CLK_ECSPI4]       = imx_clk_gate2("ecspi4",       "ecspi_root",        base + 0x6c, 6);
+       clks[IMX6SL_CLK_ENET]         = imx_clk_gate2("enet",         "ipg",               base + 0x6c, 10);
        clks[IMX6SL_CLK_EPIT1]        = imx_clk_gate2("epit1",        "perclk",            base + 0x6c, 12);
        clks[IMX6SL_CLK_EPIT2]        = imx_clk_gate2("epit2",        "perclk",            base + 0x6c, 14);
        clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16);
index ba43321..64f8e25 100644 (file)
@@ -28,7 +28,7 @@ config ARCH_CINTEGRATOR
        bool
 
 config INTEGRATOR_IMPD1
-       tristate "Include support for Integrator/IM-PD1"
+       bool "Include support for Integrator/IM-PD1"
        depends on ARCH_INTEGRATOR_AP
        select ARCH_REQUIRE_GPIOLIB
        select ARM_VIC
index 0e870ea..3ce8807 100644 (file)
@@ -308,7 +308,12 @@ static struct impd1_device impd1_devs[] = {
  */
 #define IMPD1_VALID_IRQS 0x00000bffU
 
-static int __init impd1_probe(struct lm_device *dev)
+/*
+ * As this module is bool, it is OK to have this as __init_refok() - no
+ * probe calls will be done after the initial system bootup, as devices
+ * are discovered as part of the machine startup.
+ */
+static int __init_refok impd1_probe(struct lm_device *dev)
 {
        struct impd1_module *impd1;
        int irq_base;
@@ -397,6 +402,11 @@ static void impd1_remove(struct lm_device *dev)
 static struct lm_driver impd1_driver = {
        .drv = {
                .name   = "impd1",
+               /*
+                * As we're dropping the probe() function, suppress driver
+                * binding from sysfs.
+                */
+               .suppress_bind_attrs = true,
        },
        .probe          = impd1_probe,
        .remove         = impd1_remove,
index dd0cc67..660ca6f 100644 (file)
@@ -480,25 +480,18 @@ static const struct of_device_id ebi_match[] = {
 static void __init ap_init_of(void)
 {
        unsigned long sc_dec;
-       struct device_node *root;
        struct device_node *syscon;
        struct device_node *ebi;
        struct device *parent;
        struct soc_device *soc_dev;
        struct soc_device_attribute *soc_dev_attr;
        u32 ap_sc_id;
-       int err;
        int i;
 
-       /* Here we create an SoC device for the root node */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return;
-
-       syscon = of_find_matching_node(root, ap_syscon_match);
+       syscon = of_find_matching_node(NULL, ap_syscon_match);
        if (!syscon)
                return;
-       ebi = of_find_matching_node(root, ebi_match);
+       ebi = of_find_matching_node(NULL, ebi_match);
        if (!ebi)
                return;
 
@@ -509,19 +502,17 @@ static void __init ap_init_of(void)
        if (!ebi_base)
                return;
 
+       of_platform_populate(NULL, of_default_bus_match_table,
+                       ap_auxdata_lookup, NULL);
+
        ap_sc_id = readl(ap_syscon_base);
 
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                return;
 
-       err = of_property_read_string(root, "compatible",
-                                     &soc_dev_attr->soc_id);
-       if (err)
-               return;
-       err = of_property_read_string(root, "model", &soc_dev_attr->machine);
-       if (err)
-               return;
+       soc_dev_attr->soc_id = "XVC";
+       soc_dev_attr->machine = "Integrator/AP";
        soc_dev_attr->family = "Integrator";
        soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
                                           'A' + (ap_sc_id & 0x0f));
@@ -536,9 +527,6 @@ static void __init ap_init_of(void)
        parent = soc_device_to_device(soc_dev);
        integrator_init_sysfs(parent, ap_sc_id);
 
-       of_platform_populate(root, of_default_bus_match_table,
-                       ap_auxdata_lookup, parent);
-
        sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
        for (i = 0; i < 4; i++) {
                struct lm_device *lmdev;
index a938242..0e57f8f 100644 (file)
@@ -279,20 +279,13 @@ static const struct of_device_id intcp_syscon_match[] = {
 
 static void __init intcp_init_of(void)
 {
-       struct device_node *root;
        struct device_node *cpcon;
        struct device *parent;
        struct soc_device *soc_dev;
        struct soc_device_attribute *soc_dev_attr;
        u32 intcp_sc_id;
-       int err;
 
-       /* Here we create an SoC device for the root node */
-       root = of_find_node_by_path("/");
-       if (!root)
-               return;
-
-       cpcon = of_find_matching_node(root, intcp_syscon_match);
+       cpcon = of_find_matching_node(NULL, intcp_syscon_match);
        if (!cpcon)
                return;
 
@@ -300,19 +293,17 @@ static void __init intcp_init_of(void)
        if (!intcp_con_base)
                return;
 
+       of_platform_populate(NULL, of_default_bus_match_table,
+                            intcp_auxdata_lookup, NULL);
+
        intcp_sc_id = readl(intcp_con_base);
 
        soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
        if (!soc_dev_attr)
                return;
 
-       err = of_property_read_string(root, "compatible",
-                                     &soc_dev_attr->soc_id);
-       if (err)
-               return;
-       err = of_property_read_string(root, "model", &soc_dev_attr->machine);
-       if (err)
-               return;
+       soc_dev_attr->soc_id = "XCV";
+       soc_dev_attr->machine = "Integrator/CP";
        soc_dev_attr->family = "Integrator";
        soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
                                           'A' + (intcp_sc_id & 0x0f));
@@ -326,8 +317,6 @@ static void __init intcp_init_of(void)
 
        parent = soc_device_to_device(soc_dev);
        integrator_init_sysfs(parent, intcp_sc_id);
-       of_platform_populate(root, of_default_bus_match_table,
-                       intcp_auxdata_lookup, parent);
 }
 
 static const char * intcp_dt_board_compat[] = {
index f50bc93..98a156a 100644 (file)
@@ -1,6 +1,7 @@
 config ARCH_KEYSTONE
        bool "Texas Instruments Keystone Devices"
        depends on ARCH_MULTI_V7
+       depends on ARM_PATCH_PHYS_VIRT
        select ARM_GIC
        select HAVE_ARM_ARCH_TIMER
        select CLKSRC_MMIO
index 82a4ba8..f49328c 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_MOXART
+menuconfig ARCH_MOXART
        bool "MOXA ART SoC" if ARCH_MULTI_V4
        select CPU_FA526
        select ARM_DMA_MEM_BUFFERABLE
index 6090b9e..b9bc599 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_MVEBU
+menuconfig ARCH_MVEBU
        bool "Marvell Engineering Business Unit (MVEBU) SoCs" if (ARCH_MULTI_V7 || ARCH_MULTI_V5)
        select ARCH_SUPPORTS_BIG_ENDIAN
        select CLKSRC_MMIO
@@ -10,15 +10,15 @@ config ARCH_MVEBU
        select ZONE_DMA if ARM_LPAE
        select ARCH_REQUIRE_GPIOLIB
        select PCI_QUIRKS if PCI
+       select OF_ADDRESS_PCI
 
 if ARCH_MVEBU
 
-menu "Marvell EBU SoC variants"
-
 config MACH_MVEBU_V7
        bool
        select ARMADA_370_XP_TIMER
        select CACHE_L2X0
+       select ARM_CPU_SUSPEND
 
 config MACH_ARMADA_370
        bool "Marvell Armada 370 boards" if ARCH_MULTI_V7
@@ -84,7 +84,6 @@ config MACH_DOVE
 
 config MACH_KIRKWOOD
        bool "Marvell Kirkwood boards" if ARCH_MULTI_V5
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select CPU_FEROCEON
        select KIRKWOOD_CLK
@@ -97,6 +96,4 @@ config MACH_KIRKWOOD
          Say 'Y' here if you want your kernel to support boards based
          on the Marvell Kirkwood device tree.
 
-endmenu
-
 endif
index 2ecb828..1636cdb 100644 (file)
@@ -7,7 +7,7 @@ CFLAGS_pmsu.o                   := -march=armv7-a
 obj-y                           += system-controller.o mvebu-soc-id.o
 
 ifeq ($(CONFIG_MACH_MVEBU_V7),y)
-obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o
+obj-y                           += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
 obj-$(CONFIG_SMP)               += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
 obj-$(CONFIG_HOTPLUG_CPU)       += hotplug.o
 endif
index 8bb742f..b2524d6 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/mbus.h>
 #include <linux/signal.h>
 #include <linux/slab.h>
+#include <linux/irqchip.h>
 #include <asm/hardware/cache-l2x0.h>
 #include <asm/mach/arch.h>
 #include <asm/mach/map.h>
@@ -71,17 +72,23 @@ static int armada_375_external_abort_wa(unsigned long addr, unsigned int fsr,
        return 1;
 }
 
-static void __init mvebu_timer_and_clk_init(void)
+static void __init mvebu_init_irq(void)
 {
-       of_clk_init(NULL);
-       clocksource_of_init();
+       irqchip_init();
        mvebu_scu_enable();
        coherency_init();
        BUG_ON(mvebu_mbus_dt_init(coherency_available()));
+}
+
+static void __init external_abort_quirk(void)
+{
+       u32 dev, rev;
 
-       if (of_machine_is_compatible("marvell,armada375"))
-               hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0,
-                               "imprecise external abort");
+       if (mvebu_get_soc_id(&dev, &rev) == 0 && rev > ARMADA_375_Z1_REV)
+               return;
+
+       hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0,
+                       "imprecise external abort");
 }
 
 static void __init i2c_quirk(void)
@@ -169,8 +176,10 @@ static void __init mvebu_dt_init(void)
 {
        if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
                i2c_quirk();
-       if (of_machine_is_compatible("marvell,a375-db"))
+       if (of_machine_is_compatible("marvell,a375-db")) {
+               external_abort_quirk();
                thermal_quirk();
+       }
 
        of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
 }
@@ -185,7 +194,7 @@ DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
        .l2c_aux_mask   = ~0,
        .smp            = smp_ops(armada_xp_smp_ops),
        .init_machine   = mvebu_dt_init,
-       .init_time      = mvebu_timer_and_clk_init,
+       .init_irq       = mvebu_init_irq,
        .restart        = mvebu_restart,
        .dt_compat      = armada_370_xp_dt_compat,
 MACHINE_END
@@ -198,7 +207,7 @@ static const char * const armada_375_dt_compat[] = {
 DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
        .l2c_aux_val    = 0,
        .l2c_aux_mask   = ~0,
-       .init_time      = mvebu_timer_and_clk_init,
+       .init_irq       = mvebu_init_irq,
        .init_machine   = mvebu_dt_init,
        .restart        = mvebu_restart,
        .dt_compat      = armada_375_dt_compat,
@@ -213,7 +222,7 @@ static const char * const armada_38x_dt_compat[] = {
 DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
        .l2c_aux_val    = 0,
        .l2c_aux_mask   = ~0,
-       .init_time      = mvebu_timer_and_clk_init,
+       .init_irq       = mvebu_init_irq,
        .restart        = mvebu_restart,
        .dt_compat      = armada_38x_dt_compat,
 MACHINE_END
index 477202f..2bdc323 100644 (file)
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
        .notifier_call = mvebu_hwcc_notifier,
 };
 
+static struct notifier_block mvebu_hwcc_pci_nb = {
+       .notifier_call = mvebu_hwcc_notifier,
+};
+
 static void __init armada_370_coherency_init(struct device_node *np)
 {
        struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
 {
        if (coherency_available())
                bus_register_notifier(&pci_bus_type,
-                                      &mvebu_hwcc_nb);
+                                      &mvebu_hwcc_pci_nb);
        return 0;
 }
 
index 5925366..da5bb29 100644 (file)
@@ -15,6 +15,8 @@
 #include <linux/linkage.h>
 #include <linux/init.h>
 
+#include <asm/assembler.h>
+
        __CPUINIT
 #define CPU_RESUME_ADDR_REG 0xf10182d4
 
 .global armada_375_smp_cpu1_enable_code_end
 
 armada_375_smp_cpu1_enable_code_start:
-       ldr     r0, [pc, #4]
+ARM_BE8(setend be)
+       adr     r0, 1f
+       ldr     r0, [r0]
        ldr     r1, [r0]
+ARM_BE8(rev    r1, r1)
        mov     pc, r1
+1:
        .word   CPU_RESUME_ADDR_REG
 armada_375_smp_cpu1_enable_code_end:
 
 ENTRY(mvebu_cortex_a9_secondary_startup)
+ARM_BE8(setend be)
        bl      v7_invalidate_l1
        b       secondary_startup
 ENDPROC(mvebu_cortex_a9_secondary_startup)
index 53a55c8..25aa823 100644 (file)
@@ -66,6 +66,8 @@ static void __iomem *pmsu_mp_base;
 extern void ll_disable_coherency(void);
 extern void ll_enable_coherency(void);
 
+extern void armada_370_xp_cpu_resume(void);
+
 static struct platform_device armada_xp_cpuidle_device = {
        .name = "cpuidle-armada-370-xp",
 };
@@ -140,13 +142,6 @@ static void armada_370_xp_pmsu_enable_l2_powerdown_onidle(void)
        writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
 }
 
-static void armada_370_xp_cpu_resume(void)
-{
-       asm volatile("bl    ll_add_cpu_to_smp_group\n\t"
-                    "bl    ll_enable_coherency\n\t"
-                    "b     cpu_resume\n\t");
-}
-
 /* No locking is needed because we only access per-CPU registers */
 void armada_370_xp_pmsu_idle_prepare(bool deepidle)
 {
@@ -206,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
 
        /* Test the CR_C bit and set it if it was cleared */
        asm volatile(
-       "mrc    p15, 0, %0, c1, c0, 0 \n\t"
-       "tst    %0, #(1 << 2) \n\t"
-       "orreq  %0, %0, #(1 << 2) \n\t"
-       "mcreq  p15, 0, %0, c1, c0, 0 \n\t"
+       "mrc    p15, 0, r0, c1, c0, 0 \n\t"
+       "tst    r0, #(1 << 2) \n\t"
+       "orreq  r0, r0, #(1 << 2) \n\t"
+       "mcreq  p15, 0, r0, c1, c0, 0 \n\t"
        "isb    "
-       : : "r" (0));
+       : : : "r0");
 
        pr_warn("Failed to suspend the system\n");
 
diff --git a/arch/arm/mach-mvebu/pmsu_ll.S b/arch/arm/mach-mvebu/pmsu_ll.S
new file mode 100644 (file)
index 0000000..fc3de68
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 Marvell
+ *
+ * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
+ * Gregory Clement <gregory.clement@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2.  This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+
+/*
+ * This is the entry point through which CPUs exiting cpuidle deep
+ * idle state are going.
+ */
+ENTRY(armada_370_xp_cpu_resume)
+ARM_BE8(setend be )                    @ go BE8 if entered LE
+       bl      ll_add_cpu_to_smp_group
+       bl      ll_enable_coherency
+       b       cpu_resume
+ENDPROC(armada_370_xp_cpu_resume)
+
index 486d301..3c61096 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_NOMADIK
+menuconfig ARCH_NOMADIK
        bool "ST-Ericsson Nomadik"
        depends on ARCH_MULTI_V5
        select ARCH_REQUIRE_GPIOLIB
@@ -15,7 +15,6 @@ config ARCH_NOMADIK
          Support for the Nomadik platform by ST-Ericsson
 
 if ARCH_NOMADIK
-menu "Nomadik boards"
 
 config MACH_NOMADIK_8815NHK
        bool "ST 8815 Nomadik Hardware Kit (evaluation board)"
@@ -24,7 +23,6 @@ config MACH_NOMADIK_8815NHK
        select I2C_ALGOBIT
        select I2C_NOMADIK
 
-endmenu
 endif
 
 config NOMADIK_8815
index 0ba4826..1c1ed73 100644 (file)
@@ -1,3 +1,6 @@
+menu "TI OMAP/AM/DM/DRA Family"
+       depends on ARCH_MULTI_V6 || ARCH_MULTI_V7
+
 config ARCH_OMAP
        bool
 
@@ -28,12 +31,11 @@ config ARCH_OMAP4
        select ARM_CPU_SUSPEND if PM
        select ARM_ERRATA_720789
        select ARM_GIC
-       select CACHE_L2X0
        select HAVE_ARM_SCU if SMP
        select HAVE_ARM_TWD if SMP
        select OMAP_INTERCONNECT
-       select PL310_ERRATA_588369
-       select PL310_ERRATA_727915
+       select PL310_ERRATA_588369 if CACHE_L2X0
+       select PL310_ERRATA_727915 if CACHE_L2X0
        select PM_OPP if PM
        select PM_RUNTIME if CPU_IDLE
        select ARM_ERRATA_754322
@@ -80,7 +82,6 @@ config SOC_DRA7XX
 config ARCH_OMAP2PLUS
        bool
        select ARCH_HAS_BANDGAP
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_HOLES_MEMORYMODEL
        select ARCH_OMAP
        select ARCH_REQUIRE_GPIOLIB
@@ -343,3 +344,5 @@ config OMAP4_ERRATA_I688
 endmenu
 
 endif
+
+endmenu
index 8421f38..8ca99e9 100644 (file)
@@ -110,14 +110,16 @@ obj-y                                     += prm_common.o cm_common.o
 obj-$(CONFIG_ARCH_OMAP2)               += prm2xxx_3xxx.o prm2xxx.o cm2xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += prm2xxx_3xxx.o prm3xxx.o cm3xxx.o
 obj-$(CONFIG_ARCH_OMAP3)               += vc3xxx_data.o vp3xxx_data.o
-obj-$(CONFIG_SOC_AM33XX)               += prm33xx.o cm33xx.o
 omap-prcm-4-5-common                   =  cminst44xx.o cm44xx.o prm44xx.o \
                                           prcm_mpu44xx.o prminst44xx.o \
                                           vc44xx_data.o vp44xx_data.o
 obj-$(CONFIG_ARCH_OMAP4)               += $(omap-prcm-4-5-common)
 obj-$(CONFIG_SOC_OMAP5)                        += $(omap-prcm-4-5-common)
 obj-$(CONFIG_SOC_DRA7XX)               += $(omap-prcm-4-5-common)
-obj-$(CONFIG_SOC_AM43XX)               += $(omap-prcm-4-5-common)
+am33xx-43xx-prcm-common                        += prm33xx.o cm33xx.o
+obj-$(CONFIG_SOC_AM33XX)               += $(am33xx-43xx-prcm-common)
+obj-$(CONFIG_SOC_AM43XX)               += $(omap-prcm-4-5-common) \
+                                          $(am33xx-43xx-prcm-common)
 
 # OMAP voltage domains
 voltagedomain-common                   := voltage.o vc.o vp.o
index 332af92..67fd26a 100644 (file)
@@ -76,7 +76,7 @@
  * (assuming that it is counting N upwards), or -2 if the enclosing loop
  * should skip to the next iteration (again assuming N is increasing).
  */
-static int _dpll_test_fint(struct clk_hw_omap *clk, u8 n)
+static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
 {
        struct dpll_data *dd;
        long fint, fint_min, fint_max;
index 04dab2f..ee6c784 100644 (file)
 #define OMAP3430_EN_WDT3_SHIFT                         12
 #define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK           (1 << 0)
 #define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT          0
+#define OMAP3430_IVA2_DPLL_FREQSEL_SHIFT               4
 #define OMAP3430_IVA2_DPLL_FREQSEL_MASK                        (0xf << 4)
 #define OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT         3
+#define OMAP3430_EN_IVA2_DPLL_SHIFT                    0
 #define OMAP3430_EN_IVA2_DPLL_MASK                     (0x7 << 0)
 #define OMAP3430_ST_IVA2_SHIFT                         0
 #define OMAP3430_ST_IVA2_CLK_MASK                      (1 << 0)
+#define OMAP3430_AUTO_IVA2_DPLL_SHIFT                  0
 #define OMAP3430_AUTO_IVA2_DPLL_MASK                   (0x7 << 0)
 #define OMAP3430_IVA2_CLK_SRC_SHIFT                    19
 #define OMAP3430_IVA2_CLK_SRC_WIDTH                    3
index 15a778c..bd24417 100644 (file)
@@ -380,7 +380,7 @@ void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs);
 void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs);
 void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs);
 
-#ifdef CONFIG_SOC_AM33XX
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 extern int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs,
                                        u16 clkctrl_offs);
 extern void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs,
index ff02973..dc571f1 100644 (file)
@@ -91,7 +91,14 @@ extern void omap3_sync32k_timer_init(void);
 extern void omap3_secure_sync32k_timer_init(void);
 extern void omap3_gptimer_timer_init(void);
 extern void omap4_local_timer_init(void);
+#ifdef CONFIG_CACHE_L2X0
 int omap_l2_cache_init(void);
+#else
+static inline int omap_l2_cache_init(void)
+{
+       return 0;
+}
+#endif
 extern void omap5_realtime_timer_init(void);
 
 void omap2420_init_early(void);
@@ -155,7 +162,8 @@ static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd)
 }
 #endif
 
-#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5)
+#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
+       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
 void omap44xx_restart(enum reboot_mode mode, const char *cmd);
 #else
 static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
@@ -241,7 +249,6 @@ static inline void __iomem *omap4_get_scu_base(void)
 }
 #endif
 
-extern void __init gic_init_irq(void);
 extern void gic_dist_disable(void);
 extern void gic_dist_enable(void);
 extern bool gic_dist_disabled(void);
index 592ba0a..b6f8f34 100644 (file)
@@ -297,33 +297,6 @@ static void omap_init_audio(void)
 static inline void omap_init_audio(void) {}
 #endif
 
-#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \
-               defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE)
-
-static struct platform_device omap_hdmi_audio = {
-       .name   = "omap-hdmi-audio",
-       .id     = -1,
-};
-
-static void __init omap_init_hdmi_audio(void)
-{
-       struct omap_hwmod *oh;
-       struct platform_device *pdev;
-
-       oh = omap_hwmod_lookup("dss_hdmi");
-       if (!oh)
-               return;
-
-       pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
-       WARN(IS_ERR(pdev),
-            "Can't build omap_device for omap-hdmi-audio-dai.\n");
-
-       platform_device_register(&omap_hdmi_audio);
-}
-#else
-static inline void omap_init_hdmi_audio(void) {}
-#endif
-
 #if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
 
 #include <linux/platform_data/spi-omap2-mcspi.h>
@@ -459,7 +432,6 @@ static int __init omap2_init_devices(void)
         */
        omap_init_audio();
        omap_init_camera();
-       omap_init_hdmi_audio();
        omap_init_mbox();
        /* If dtb is there, the devices will be created dynamically */
        if (!of_have_populated_dt()) {
index b8208b4..f7492df 100644 (file)
@@ -29,6 +29,7 @@
 #ifdef CONFIG_TIDSPBRIDGE_DVFS
 #include "omap-pm.h"
 #endif
+#include "soc.h"
 
 #include <linux/platform_data/dsp-omap.h>
 
@@ -59,6 +60,9 @@ void __init omap_dsp_reserve_sdram_memblock(void)
        phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
        phys_addr_t paddr;
 
+       if (!cpu_is_omap34xx())
+               return;
+
        if (!size)
                return;
 
@@ -83,6 +87,9 @@ static int __init omap_dsp_init(void)
        int err = -ENOMEM;
        struct omap_dsp_platform_data *pdata = &omap_dsp_pdata;
 
+       if (!cpu_is_omap34xx())
+               return 0;
+
        pdata->phys_mempool_base = omap_dsp_get_mempool_base();
 
        if (pdata->phys_mempool_base) {
@@ -115,6 +122,9 @@ module_init(omap_dsp_init);
 
 static void __exit omap_dsp_exit(void)
 {
+       if (!cpu_is_omap34xx())
+               return;
+
        platform_device_unregister(omap_dsp_pdev);
 }
 module_exit(omap_dsp_exit);
index 17cd393..93914d2 100644 (file)
@@ -50,6 +50,16 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
                 soc_is_omap54xx() || soc_is_dra7xx())
                return 1;
 
+       if (ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW ||
+                ecc_opt == OMAP_ECC_BCH8_CODE_HW_DETECTION_SW) {
+               if (cpu_is_omap24xx())
+                       return 0;
+               else if (cpu_is_omap3630() && (GET_OMAP_REVISION() == 0))
+                       return 0;
+               else
+                       return 1;
+       }
+
        /* OMAP3xxx do not have ELM engine, so cannot support ECC schemes
         * which require H/W based ECC error detection */
        if ((cpu_is_omap34xx() || cpu_is_omap3630()) &&
@@ -57,14 +67,6 @@ static bool gpmc_hwecc_bch_capable(enum omap_ecc ecc_opt)
                 (ecc_opt == OMAP_ECC_BCH8_CODE_HW)))
                return 0;
 
-       /*
-        * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1
-        * and AM33xx derivates. Other chips may be added if confirmed to work.
-        */
-       if ((ecc_opt == OMAP_ECC_BCH4_CODE_HW_DETECTION_SW) &&
-           (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0)))
-               return 0;
-
        /* legacy platforms support only HAM1 (1-bit Hamming) ECC scheme */
        if (ecc_opt == OMAP_ECC_HAM1_CODE_HW)
                return 1;
index 2c0c281..8bc1338 100644 (file)
@@ -1615,7 +1615,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
                return ret;
        }
 
-       for_each_child_of_node(pdev->dev.of_node, child) {
+       for_each_available_child_of_node(pdev->dev.of_node, child) {
 
                if (!child->name)
                        continue;
index 43969da..d42022f 100644 (file)
@@ -649,6 +649,18 @@ void __init dra7xxx_check_revision(void)
                }
                break;
 
+       case 0xb9bc:
+               switch (rev) {
+               case 0:
+                       omap_revision = DRA722_REV_ES1_0;
+                       break;
+               default:
+                       /* If we have no new revisions */
+                       omap_revision = DRA722_REV_ES1_0;
+                       break;
+               }
+               break;
+
        default:
                /* Unknown default to latest silicon rev as default*/
                pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n",
index fd88ede..f62f753 100644 (file)
@@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
                m0_entry = mux->muxnames[0];
 
                /* First check for full name in mode0.muxmode format */
-               if (mode0_len && strncmp(muxname, m0_entry, mode0_len))
-                       continue;
+               if (mode0_len)
+                       if (strncmp(muxname, m0_entry, mode0_len) ||
+                           (strlen(m0_entry) != mode0_len))
+                               continue;
 
                /* Then check for muxmode only */
                for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
index 326cd98..a0fe747 100644 (file)
@@ -102,26 +102,6 @@ void __init omap_barriers_init(void)
 {}
 #endif
 
-void __init gic_init_irq(void)
-{
-       void __iomem *omap_irq_base;
-
-       /* Static mapping, never released */
-       gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
-       BUG_ON(!gic_dist_base_addr);
-
-       twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_4K);
-       BUG_ON(!twd_base);
-
-       /* Static mapping, never released */
-       omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
-       BUG_ON(!omap_irq_base);
-
-       omap_wakeupgen_init();
-
-       gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
-}
-
 void gic_dist_disable(void)
 {
        if (gic_dist_base_addr)
@@ -188,6 +168,10 @@ static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
                smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
                break;
 
+       case L310_POWER_CTRL:
+               pr_info_once("OMAP L2C310: ROM does not support power control setting\n");
+               return;
+
        default:
                WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
                return;
index f7bb435..6c074f3 100644 (file)
@@ -4251,9 +4251,9 @@ void __init omap_hwmod_init(void)
                soc_ops.enable_module = _omap4_enable_module;
                soc_ops.disable_module = _omap4_disable_module;
                soc_ops.wait_target_ready = _omap4_wait_target_ready;
-               soc_ops.assert_hardreset = _omap4_assert_hardreset;
-               soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
-               soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
+               soc_ops.assert_hardreset = _am33xx_assert_hardreset;
+               soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
+               soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted;
                soc_ops.init_clkdm = _init_clkdm;
        } else if (soc_is_am33xx()) {
                soc_ops.enable_module = _am33xx_enable_module;
index 290213f..1103aa0 100644 (file)
@@ -2020,6 +2020,77 @@ static struct omap_hwmod omap54xx_wd_timer2_hwmod = {
        },
 };
 
+/*
+ * 'ocp2scp' class
+ * bridge to transform ocp interface protocol to scp (serial control port)
+ * protocol
+ */
+/* ocp2scp3 */
+static struct omap_hwmod omap54xx_ocp2scp3_hwmod;
+/* l4_cfg -> ocp2scp3 */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__ocp2scp3 = {
+       .master         = &omap54xx_l4_cfg_hwmod,
+       .slave          = &omap54xx_ocp2scp3_hwmod,
+       .clk            = "l4_root_clk_div",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod omap54xx_ocp2scp3_hwmod = {
+       .name           = "ocp2scp3",
+       .class          = &omap54xx_ocp2scp_hwmod_class,
+       .clkdm_name     = "l3init_clkdm",
+       .prcm = {
+               .omap4 = {
+                       .clkctrl_offs = OMAP54XX_CM_L3INIT_OCP2SCP3_CLKCTRL_OFFSET,
+                       .context_offs = OMAP54XX_RM_L3INIT_OCP2SCP3_CONTEXT_OFFSET,
+                       .modulemode   = MODULEMODE_HWCTRL,
+               },
+       },
+};
+
+/*
+ * 'sata' class
+ * sata:  serial ata interface  gen2 compliant   ( 1 rx/ 1 tx)
+ */
+
+static struct omap_hwmod_class_sysconfig omap54xx_sata_sysc = {
+       .sysc_offs      = 0x0000,
+       .sysc_flags     = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+                          MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+       .sysc_fields    = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class omap54xx_sata_hwmod_class = {
+       .name   = "sata",
+       .sysc   = &omap54xx_sata_sysc,
+};
+
+/* sata */
+static struct omap_hwmod omap54xx_sata_hwmod = {
+       .name           = "sata",
+       .class          = &omap54xx_sata_hwmod_class,
+       .clkdm_name     = "l3init_clkdm",
+       .flags          = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
+       .main_clk       = "func_48m_fclk",
+       .mpu_rt_idx     = 1,
+       .prcm = {
+               .omap4 = {
+                       .clkctrl_offs = OMAP54XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
+                       .context_offs = OMAP54XX_RM_L3INIT_SATA_CONTEXT_OFFSET,
+                       .modulemode   = MODULEMODE_SWCTRL,
+               },
+       },
+};
+
+/* l4_cfg -> sata */
+static struct omap_hwmod_ocp_if omap54xx_l4_cfg__sata = {
+       .master         = &omap54xx_l4_cfg_hwmod,
+       .slave          = &omap54xx_sata_hwmod,
+       .clk            = "l3_iclk_div",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
 
 /*
  * Interfaces
@@ -2765,6 +2836,8 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
        &omap54xx_l4_cfg__usb_tll_hs,
        &omap54xx_l4_cfg__usb_otg_ss,
        &omap54xx_l4_wkup__wd_timer2,
+       &omap54xx_l4_cfg__ocp2scp3,
+       &omap54xx_l4_cfg__sata,
        NULL,
 };
 
index 20b4398..284324f 100644 (file)
@@ -1268,9 +1268,6 @@ static struct omap_hwmod_class dra7xx_sata_hwmod_class = {
 };
 
 /* sata */
-static struct omap_hwmod_opt_clk sata_opt_clks[] = {
-       { .role = "ref_clk", .clk = "sata_ref_clk" },
-};
 
 static struct omap_hwmod dra7xx_sata_hwmod = {
        .name           = "sata",
@@ -1278,6 +1275,7 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
        .clkdm_name     = "l3init_clkdm",
        .flags          = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
        .main_clk       = "func_48m_fclk",
+       .mpu_rt_idx     = 1,
        .prcm = {
                .omap4 = {
                        .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
@@ -1285,8 +1283,6 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
                        .modulemode   = MODULEMODE_SWCTRL,
                },
        },
-       .opt_clks       = sata_opt_clks,
-       .opt_clks_cnt   = ARRAY_SIZE(sata_opt_clks),
 };
 
 /*
@@ -1731,8 +1727,20 @@ static struct omap_hwmod dra7xx_uart6_hwmod = {
  *
  */
 
+static struct omap_hwmod_class_sysconfig dra7xx_usb_otg_ss_sysc = {
+       .rev_offs       = 0x0000,
+       .sysc_offs      = 0x0010,
+       .sysc_flags     = (SYSC_HAS_DMADISABLE | SYSC_HAS_MIDLEMODE |
+                          SYSC_HAS_SIDLEMODE),
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                          SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
+                          MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
+       .sysc_fields    = &omap_hwmod_sysc_type2,
+};
+
 static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = {
        .name   = "usb_otg_ss",
+       .sysc   = &dra7xx_usb_otg_ss_sysc,
 };
 
 /* usb_otg_ss1 */
index 106132d..cbefbd7 100644 (file)
@@ -35,6 +35,8 @@
 #define OMAP3430_LOGICSTATEST_MASK                     (1 << 2)
 #define OMAP3430_LASTLOGICSTATEENTERED_MASK            (1 << 2)
 #define OMAP3430_LASTPOWERSTATEENTERED_MASK            (0x3 << 0)
+#define OMAP3430_GRPSEL_MCBSP5_MASK                    (1 << 10)
+#define OMAP3430_GRPSEL_MCBSP1_MASK                    (1 << 9)
 #define OMAP3630_GRPSEL_UART4_MASK                     (1 << 18)
 #define OMAP3430_GRPSEL_GPIO6_MASK                     (1 << 17)
 #define OMAP3430_GRPSEL_GPIO5_MASK                     (1 << 16)
 #define OMAP3430_GRPSEL_GPIO3_MASK                     (1 << 14)
 #define OMAP3430_GRPSEL_GPIO2_MASK                     (1 << 13)
 #define OMAP3430_GRPSEL_UART3_MASK                     (1 << 11)
+#define OMAP3430_GRPSEL_GPT8_MASK                      (1 << 9)
+#define OMAP3430_GRPSEL_GPT7_MASK                      (1 << 8)
+#define OMAP3430_GRPSEL_GPT6_MASK                      (1 << 7)
+#define OMAP3430_GRPSEL_GPT5_MASK                      (1 << 6)
 #define OMAP3430_GRPSEL_MCBSP4_MASK                    (1 << 2)
 #define OMAP3430_GRPSEL_MCBSP3_MASK                    (1 << 1)
 #define OMAP3430_GRPSEL_MCBSP2_MASK                    (1 << 0)
index de2a34c..01ca808 100644 (file)
@@ -462,6 +462,7 @@ IS_OMAP_TYPE(3430, 0x3430)
 #define DRA7XX_CLASS           0x07000000
 #define DRA752_REV_ES1_0       (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
 #define DRA752_REV_ES1_1       (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
+#define DRA722_REV_ES1_0       (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
 
 void omap2xxx_check_revision(void);
 void omap3xxx_check_revision(void);
index e4e505f..042f693 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_SIRF
+menuconfig ARCH_SIRF
        bool "CSR SiRF" if ARCH_MULTI_V7
        select ARCH_HAS_RESET_CONTROLLER
        select ARCH_REQUIRE_GPIOLIB
@@ -11,7 +11,7 @@ config ARCH_SIRF
 
 if ARCH_SIRF
 
-menu "CSR SiRF atlas6/primaII/Marco/Polo Specific Features"
+comment "CSR SiRF atlas6/primaII/Marco/Polo Specific Features"
 
 config ARCH_ATLAS6
        bool "CSR SiRFSoC ATLAS6 ARM Cortex A9 Platform"
@@ -37,8 +37,6 @@ config ARCH_MARCO
        help
           Support for CSR SiRFSoC ARM Cortex A9 Platform
 
-endmenu
-
 config SIRF_IRQ
        bool
 
index fd2b99d..ee5697b 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_QCOM
+menuconfig ARCH_QCOM
        bool "Qualcomm Support" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
        select ARM_GIC
@@ -11,8 +11,6 @@ config ARCH_QCOM
 
 if ARCH_QCOM
 
-menu "Qualcomm SoC Selection"
-
 config ARCH_MSM8X60
        bool "Enable support for MSM8X60"
        select CLKSRC_QCOM
@@ -25,8 +23,6 @@ config ARCH_MSM8974
        bool "Enable support for MSM8974"
        select HAVE_ARM_ARCH_TIMER
 
-endmenu
-
 config QCOM_SCM
        bool
 
index 04284de..ad5316a 100644 (file)
@@ -117,7 +117,7 @@ config S3C24XX_SETUP_TS
          Compile in platform device definition for Samsung TouchScreen.
 
 config S3C24XX_DMA
-       bool "S3C2410 DMA support"
+       bool "S3C2410 DMA support (deprecated)"
        select S3C_DMA
        help
          S3C2410 DMA support. This is needed for drivers like sound which
index 3136d86..26ca242 100644 (file)
@@ -18,9 +18,9 @@ config CPU_S3C6410
          Enable S3C6410 CPU support
 
 config S3C64XX_PL080
-       bool "S3C64XX DMA using generic PL08x driver"
+       def_bool DMADEVICES
+       select ARM_AMBA
        select AMBA_PL08X
-       select SAMSUNG_DMADEV
 
 config S3C64XX_SETUP_SDHCI
        bool
index bb2111b..26003e2 100644 (file)
@@ -9,16 +9,18 @@ if ARCH_S5P64X0
 
 config CPU_S5P6440
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_SLEEP if PM
-       select SAMSUNG_DMADEV
        select SAMSUNG_WAKEMASK if PM
        help
          Enable S5P6440 CPU support
 
 config CPU_S5P6450
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_SLEEP if PM
-       select SAMSUNG_DMADEV
        select SAMSUNG_WAKEMASK if PM
        help
          Enable S5P6450 CPU support
index 15170be..c5e3a96 100644 (file)
@@ -9,8 +9,9 @@ if ARCH_S5PC100
 
 config CPU_S5PC100
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_EXT_INT
-       select SAMSUNG_DMADEV
        help
          Enable S5PC100 CPU support
 
index 8c3abe5..f60f286 100644 (file)
@@ -11,10 +11,11 @@ if ARCH_S5PV210
 
 config CPU_S5PV210
        bool
+       select ARM_AMBA
+       select PL330_DMA if DMADEVICES
        select S5P_EXT_INT
        select S5P_PM if PM
        select S5P_SLEEP if PM
-       select SAMSUNG_DMADEV
        help
          Enable S5PV210 CPU support
 
index f9874ba..108939f 100644 (file)
@@ -329,6 +329,11 @@ static struct mtd_partition collie_partitions[] = {
                .name           = "rootfs",
                .offset         = MTDPART_OFS_APPEND,
                .size           = 0x00e20000,
+       }, {
+               .name           = "bootblock",
+               .offset         = MTDPART_OFS_APPEND,
+               .size           = 0x00020000,
+               .mask_flags     = MTD_WRITEABLE
        }
 };
 
@@ -356,7 +361,7 @@ static void collie_flash_exit(void)
 }
 
 static struct flash_platform_data collie_flash_data = {
-       .map_name       = "jedec_probe",
+       .map_name       = "cfi_probe",
        .init           = collie_flash_init,
        .set_vpp        = collie_set_vpp,
        .exit           = collie_flash_exit,
index dbd954e..7980730 100644 (file)
@@ -1,7 +1,7 @@
 config ARCH_SHMOBILE
        bool
 
-config ARCH_SHMOBILE_MULTI
+menuconfig ARCH_SHMOBILE_MULTI
        bool "Renesas ARM SoCs" if ARCH_MULTI_V7
        depends on MMU
        select ARCH_SHMOBILE
@@ -15,7 +15,7 @@ config ARCH_SHMOBILE_MULTI
 
 if ARCH_SHMOBILE_MULTI
 
-comment "Renesas ARM SoCs System Type"
+#comment "Renesas ARM SoCs System Type"
 
 config ARCH_EMEV2
        bool "Emma Mobile EV2"
@@ -85,7 +85,6 @@ config ARCH_R8A73A4
        select CPU_V7
        select SH_CLK_CPG
        select RENESAS_IRQC
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select SYS_SUPPORTS_SH_CMT
        select SYS_SUPPORTS_SH_TMU
@@ -264,7 +263,6 @@ config MACH_KOELSCH
 config MACH_KZM9G
        bool "KZM-A9-GT board"
        depends on ARCH_SH73A0
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_REQUIRE_GPIOLIB
        select REGULATOR_FIXED_VOLTAGE if REGULATOR
index 0786249..90df202 100644 (file)
@@ -14,7 +14,6 @@ if PLAT_SPEAR
 config ARCH_SPEAR13XX
        bool "ST SPEAr13xx"
        depends on ARCH_MULTI_V7 || PLAT_SPEAR_SINGLE
-       select ARCH_HAS_CPUFREQ
        select ARM_GIC
        select GPIO_SPEAR_SPICS
        select HAVE_ARM_SCU if SMP
index abf9ee9..878e9ec 100644 (file)
@@ -1,5 +1,5 @@
 menuconfig ARCH_STI
-       bool "STMicroelectronics Consumer Electronics SOCs with Device Trees" if ARCH_MULTI_V7
+       bool "STMicroelectronics Consumer Electronics SOCs" if ARCH_MULTI_V7
        select ARM_GIC
        select ARM_GLOBAL_TIMER
        select PINCTRL
@@ -11,8 +11,8 @@ menuconfig ARCH_STI
        select ARM_ERRATA_754322
        select ARM_ERRATA_764369 if SMP
        select ARM_ERRATA_775420
-       select PL310_ERRATA_753970 if CACHE_PL310
-       select PL310_ERRATA_769419 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
+       select PL310_ERRATA_769419 if CACHE_L2X0
        help
          Include support for STiH41x SOCs like STiH415/416 using the device tree
          for discovery
index 3f9587b..b608508 100644 (file)
 
 #include <linux/clk-provider.h>
 #include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
 
 #include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/system_misc.h>
+
+#define SUN4I_WATCHDOG_CTRL_REG                0x00
+#define SUN4I_WATCHDOG_CTRL_RESTART            BIT(0)
+#define SUN4I_WATCHDOG_MODE_REG                0x04
+#define SUN4I_WATCHDOG_MODE_ENABLE             BIT(0)
+#define SUN4I_WATCHDOG_MODE_RESET_ENABLE       BIT(1)
+
+#define SUN6I_WATCHDOG1_IRQ_REG                0x00
+#define SUN6I_WATCHDOG1_CTRL_REG       0x10
+#define SUN6I_WATCHDOG1_CTRL_RESTART           BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_REG     0x14
+#define SUN6I_WATCHDOG1_CONFIG_RESTART         BIT(0)
+#define SUN6I_WATCHDOG1_CONFIG_IRQ             BIT(1)
+#define SUN6I_WATCHDOG1_MODE_REG       0x18
+#define SUN6I_WATCHDOG1_MODE_ENABLE            BIT(0)
+
+static void __iomem *wdt_base;
+
+static void sun4i_restart(enum reboot_mode mode, const char *cmd)
+{
+       if (!wdt_base)
+               return;
+
+       /* Enable timer and set reset bit in the watchdog */
+       writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE,
+              wdt_base + SUN4I_WATCHDOG_MODE_REG);
+
+       /*
+        * Restart the watchdog. The default (and lowest) interval
+        * value for the watchdog is 0.5s.
+        */
+       writel(SUN4I_WATCHDOG_CTRL_RESTART, wdt_base + SUN4I_WATCHDOG_CTRL_REG);
+
+       while (1) {
+               mdelay(5);
+               writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE,
+                      wdt_base + SUN4I_WATCHDOG_MODE_REG);
+       }
+}
+
+static struct of_device_id sunxi_restart_ids[] = {
+       { .compatible = "allwinner,sun4i-a10-wdt" },
+       { /*sentinel*/ }
+};
+
+static void sunxi_setup_restart(void)
+{
+       struct device_node *np;
+
+       np = of_find_matching_node(NULL, sunxi_restart_ids);
+       if (WARN(!np, "unable to setup watchdog restart"))
+               return;
+
+       wdt_base = of_iomap(np, 0);
+       WARN(!wdt_base, "failed to map watchdog base address");
+}
+
+static void __init sunxi_dt_init(void)
+{
+       sunxi_setup_restart();
+
+       of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
+}
 
 static const char * const sunxi_board_dt_compat[] = {
        "allwinner,sun4i-a10",
@@ -23,7 +96,9 @@ static const char * const sunxi_board_dt_compat[] = {
 };
 
 DT_MACHINE_START(SUNXI_DT, "Allwinner A1X (Device Tree)")
+       .init_machine   = sunxi_dt_init,
        .dt_compat      = sunxi_board_dt_compat,
+       .restart        = sun4i_restart,
 MACHINE_END
 
 static const char * const sun6i_board_dt_compat[] = {
@@ -51,5 +126,7 @@ static const char * const sun7i_board_dt_compat[] = {
 };
 
 DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
+       .init_machine   = sunxi_dt_init,
        .dt_compat      = sun7i_board_dt_compat,
+       .restart        = sun4i_restart,
 MACHINE_END
index e16999e..0953996 100644 (file)
@@ -1,6 +1,5 @@
-config ARCH_TEGRA
+menuconfig ARCH_TEGRA
        bool "NVIDIA Tegra" if ARCH_MULTI_V7
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ARCH_SUPPORTS_TRUSTED_FOUNDATIONS
        select ARM_GIC
@@ -16,8 +15,7 @@ config ARCH_TEGRA
        help
          This enables support for NVIDIA Tegra based systems.
 
-menu "NVIDIA Tegra options"
-       depends on ARCH_TEGRA
+if ARCH_TEGRA
 
 config ARCH_TEGRA_2x_SOC
        bool "Enable support for Tegra20 family"
@@ -69,4 +67,4 @@ config TEGRA_AHB
          which controls AHB bus master arbitration and some
          performance parameters(priority, prefech size).
 
-endmenu
+endif
index e3a96d7..bc51a71 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_U300
+menuconfig ARCH_U300
        bool "ST-Ericsson U300 Series" if ARCH_MULTI_V5
        depends on MMU
        select ARCH_REQUIRE_GPIOLIB
@@ -16,8 +16,6 @@ config ARCH_U300
 
 if ARCH_U300
 
-menu "ST-Ericsson AB U300/U335 Platform"
-
 config MACH_U300
        depends on ARCH_U300
        bool "U300"
@@ -43,6 +41,4 @@ config MACH_U300_SPIDUMMY
                you don't need it. Selecting this will activate the
                SPI framework and ARM PL022 support.
 
-endmenu
-
 endif
index b41a42d..699e860 100644 (file)
@@ -1,9 +1,8 @@
-config ARCH_U8500
+menuconfig ARCH_U8500
        bool "ST-Ericsson U8500 Series" if ARCH_MULTI_V7
        depends on MMU
        select AB8500_CORE
        select ABX500_CORE
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select ARM_AMBA
        select ARM_ERRATA_754322
@@ -16,7 +15,7 @@ config ARCH_U8500
        select PINCTRL
        select PINCTRL_ABX500
        select PINCTRL_NOMADIK
-       select PL310_ERRATA_753970 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
        help
          Support for ST-Ericsson's Ux500 architecture
 
@@ -34,8 +33,6 @@ config UX500_SOC_DB8500
        select REGULATOR
        select REGULATOR_DB8500_PRCMU
 
-menu "Ux500 target platform (boards)"
-
 config MACH_MOP500
        bool "U8500 Development platform, MOP500 versions"
        select I2C
@@ -68,8 +65,6 @@ config UX500_AUTO_PLATFORM
          a working kernel. If everything else is disabled, this
          automatically enables MACH_MOP500.
 
-endmenu
-
 config UX500_DEBUG_UART
        int "Ux500 UART to use for low-level debug"
        default 2
index 90249cf..d8b9330 100644 (file)
@@ -1,4 +1,4 @@
-config ARCH_VEXPRESS
+menuconfig ARCH_VEXPRESS
        bool "ARM Ltd. Versatile Express family" if ARCH_MULTI_V7
        select ARCH_REQUIRE_GPIOLIB
        select ARCH_SUPPORTS_BIG_ENDIAN
@@ -37,14 +37,13 @@ config ARCH_VEXPRESS
          platforms. The traditional (ATAGs) boot method is not usable on
          these boards with this option.
 
-menu "Versatile Express platform type"
-       depends on ARCH_VEXPRESS
+if ARCH_VEXPRESS
 
 config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
        bool "Enable A5 and A9 only errata work-arounds"
        default y
        select ARM_ERRATA_720789
-       select PL310_ERRATA_753970 if CACHE_PL310
+       select PL310_ERRATA_753970 if CACHE_L2X0
        help
          Provides common dependencies for Versatile Express platforms
          based on Cortex-A5 and Cortex-A9 processors. In order to
@@ -65,7 +64,6 @@ config ARCH_VEXPRESS_DCSCB
 
 config ARCH_VEXPRESS_SPC
        bool "Versatile Express Serial Power Controller (SPC)"
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select PM_OPP
        help
@@ -83,4 +81,4 @@ config ARCH_VEXPRESS_TC2_PM
          Support for CPU and cluster power management on Versatile Express
          with a TC2 (A15x2 A7x3) big.LITTLE core tile.
 
-endmenu
+endif
index 08f56a4..aaaa24f 100644 (file)
@@ -1,6 +1,5 @@
 config ARCH_VT8500
        bool
-       select ARCH_HAS_CPUFREQ
        select ARCH_REQUIRE_GPIOLIB
        select CLKDEV_LOOKUP
        select VT8500_TIMER
index 573e0db..0c164f8 100644 (file)
@@ -1,6 +1,5 @@
 config ARCH_ZYNQ
        bool "Xilinx Zynq ARM Cortex A9 Platform" if ARCH_MULTI_V7
-       select ARCH_HAS_CPUFREQ
        select ARCH_HAS_OPP
        select ARCH_SUPPORTS_BIG_ENDIAN
        select ARM_AMBA
index eda0dd0..c348eae 100644 (file)
@@ -889,9 +889,10 @@ config CACHE_L2X0
        help
          This option enables the L2x0 PrimeCell.
 
+if CACHE_L2X0
+
 config CACHE_PL310
        bool
-       depends on CACHE_L2X0
        default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
        help
          This option enables optimisations for the PL310 cache
@@ -899,7 +900,6 @@ config CACHE_PL310
 
 config PL310_ERRATA_588369
        bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
-       depends on CACHE_L2X0
        help
           The PL310 L2 cache controller implements three types of Clean &
           Invalidate maintenance operations: by Physical Address
@@ -912,7 +912,6 @@ config PL310_ERRATA_588369
 
 config PL310_ERRATA_727915
        bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
-       depends on CACHE_L2X0
        help
          PL310 implements the Clean & Invalidate by Way L2 cache maintenance
          operation (offset 0x7FC). This operation runs in background so that
@@ -923,7 +922,6 @@ config PL310_ERRATA_727915
 
 config PL310_ERRATA_753970
        bool "PL310 errata: cache sync operation may be faulty"
-       depends on CACHE_PL310
        help
          This option enables the workaround for the 753970 PL310 (r3p0) erratum.
 
@@ -938,7 +936,6 @@ config PL310_ERRATA_753970
 
 config PL310_ERRATA_769419
        bool "PL310 errata: no automatic Store Buffer drain"
-       depends on CACHE_L2X0
        help
          On revisions of the PL310 prior to r3p2, the Store Buffer does
          not automatically drain. This can cause normal, non-cacheable
@@ -948,6 +945,8 @@ config PL310_ERRATA_769419
          on systems with an outer cache, the store buffer is drained
          explicitly.
 
+endif
+
 config CACHE_TAUROS2
        bool "Enable the Tauros2 L2 cache controller"
        depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
index efc5cab..7c3fb41 100644 (file)
@@ -664,7 +664,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
 
 static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
 {
-       unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
+       unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
        bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
 
        if (rev >= L310_CACHE_ID_RTL_R2P0) {
@@ -1068,6 +1068,33 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
        },
 };
 
+/*
+ * This is a variant of the of_l2c310_data with .sync set to
+ * NULL. Outer sync operations are not needed when the system is I/O
+ * coherent, and potentially harmful in certain situations (PCIe/PL310
+ * deadlock on Armada 375/38x due to hardware I/O coherency). The
+ * other operations are kept because they are infrequent (therefore do
+ * not cause the deadlock in practice) and needed for secondary CPU
+ * boot and other power management activities.
+ */
+static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
+       .type = "L2C-310 Coherent",
+       .way_size_0 = SZ_8K,
+       .num_lock = 8,
+       .of_parse = l2c310_of_parse,
+       .enable = l2c310_enable,
+       .fixup = l2c310_fixup,
+       .save  = l2c310_save,
+       .outer_cache = {
+               .inv_range   = l2c210_inv_range,
+               .clean_range = l2c210_clean_range,
+               .flush_range = l2c210_flush_range,
+               .flush_all   = l2c210_flush_all,
+               .disable     = l2c310_disable,
+               .resume      = l2c310_resume,
+       },
+};
+
 /*
  * Note that the end addresses passed to Linux primitives are
  * noninclusive, while the hardware cache range operations use
@@ -1487,6 +1514,10 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
 
        data = of_match_node(l2x0_ids, np)->data;
 
+       if (of_device_is_compatible(np, "arm,pl310-cache") &&
+           of_property_read_bool(np, "arm,io-coherent"))
+               data = &of_l2c310_coherent_data;
+
        old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
        if (old_aux != ((old_aux & aux_mask) | aux_val)) {
                pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
index da1874f..a014dfa 100644 (file)
@@ -300,6 +300,7 @@ void __init sanity_check_meminfo(void)
        sanity_check_meminfo_mpu();
        end = memblock_end_of_DRAM();
        high_memory = __va(end - 1) + 1;
+       memblock_set_current_limit(end);
 }
 
 /*
index 97448c3..ba0d58e 100644 (file)
@@ -502,6 +502,7 @@ __\name\()_proc_info:
        .long   \cpu_val
        .long   \cpu_mask
        .long   PMD_TYPE_SECT | \
+               PMD_SECT_CACHEABLE | \
                PMD_BIT4 | \
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
index fb5503c..a37b989 100644 (file)
@@ -56,7 +56,7 @@
 #define FLAG_NEED_X_RESET      (1 << 0)
 
 struct jit_ctx {
-       const struct sk_filter *skf;
+       const struct bpf_prog *skf;
        unsigned idx;
        unsigned prologue_bytes;
        int ret0_fp_idx;
@@ -465,7 +465,7 @@ static inline void update_on_xread(struct jit_ctx *ctx)
 static int build_body(struct jit_ctx *ctx)
 {
        void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
-       const struct sk_filter *prog = ctx->skf;
+       const struct bpf_prog *prog = ctx->skf;
        const struct sock_filter *inst;
        unsigned i, load_order, off, condt;
        int imm12;
@@ -857,7 +857,7 @@ b_epilogue:
 }
 
 
-void bpf_jit_compile(struct sk_filter *fp)
+void bpf_jit_compile(struct bpf_prog *fp)
 {
        struct jit_ctx ctx;
        unsigned tmp_idx;
@@ -926,7 +926,7 @@ out:
        return;
 }
 
-void bpf_jit_free(struct sk_filter *fp)
+void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
                module_free(NULL, fp->bpf_func);
index 243dfcb..301b892 100644 (file)
@@ -35,27 +35,15 @@ config SAMSUNG_PM
          Base platform power management code for samsung code
 
 if PLAT_SAMSUNG
+menu "Samsung Common options"
 
 # boot configurations
 
 comment "Boot options"
 
-config S3C_BOOT_ERROR_RESET
-       bool "S3C Reboot on decompression error"
-       help
-         Say y here to use the watchdog to reset the system if the
-         kernel decompressor detects an error during decompression.
-
-config S3C_BOOT_UART_FORCE_FIFO
-       bool "Force UART FIFO on during boot process"
-       default y
-       help
-         Say Y here to force the UART FIFOs on during the kernel
-        uncompressor
-
-
 config S3C_LOWLEVEL_UART_PORT
        int "S3C UART to use for low-level messages"
+       depends on ARCH_S3C64XX
        default 0
        help
          Choice of which UART port to use for the low-level messages,
@@ -407,17 +395,16 @@ config SAMSUNG_PM_GPIO
          Include legacy GPIO power management code for platforms not using
          pinctrl-samsung driver.
 
-endif
-
 config SAMSUNG_DMADEV
-       bool
-       select ARM_AMBA
+       bool "Use legacy Samsung DMA abstraction"
+       depends on CPU_S5PV210 || CPU_S5PC100 || ARCH_S5P64X0 || ARCH_S3C64XX
        select DMADEVICES
-       select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \
-                                       CPU_S5P6450 || CPU_S5P6440)
+       default y
        help
          Use DMA device engine for PL330 DMAC.
 
+endif
+
 config S5P_DEV_MFC
        bool
        help
@@ -503,4 +490,5 @@ config DEBUG_S3C_UART
        default "2" if DEBUG_S3C_UART2
        default "3" if DEBUG_S3C_UART3
 
+endmenu
 endif
index 859a9bb..91cf08b 100644 (file)
@@ -51,3 +51,8 @@ int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
 {
        return -ENOSYS;
 }
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
+{
+       return 0;
+}
index 7295419..839f48c 100644 (file)
@@ -1,8 +1,10 @@
 config ARM64
        def_bool y
        select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_HAS_OPP
        select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
+       select ARCH_USE_CMPXCHG_LOCKREF
+       select ARCH_SUPPORTS_ATOMIC_RMW
        select ARCH_WANT_OPTIONAL_GPIOLIB
        select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
        select ARCH_WANT_FRAME_POINTERS
index 1247ca1..6541962 100644 (file)
@@ -24,3 +24,7 @@
                reg = < 0x1 0x00000000 0x0 0x80000000 >; /* Updated by bootloader */
        };
 };
+
+&serial0 {
+       status = "ok";
+};
index c5f0a47..40aa96c 100644 (file)
                };
 
                serial0: serial@1c020000 {
+                       status = "disabled";
                        device_type = "serial";
-                       compatible = "ns16550";
+                       compatible = "ns16550a";
                        reg = <0 0x1c020000 0x0 0x1000>;
                        reg-shift = <2>;
                        clock-frequency = <10000000>; /* Updated by bootloader */
                        interrupts = <0x0 0x4c 0x4>;
                };
 
+               serial1: serial@1c021000 {
+                       status = "disabled";
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       reg = <0 0x1c021000 0x0 0x1000>;
+                       reg-shift = <2>;
+                       clock-frequency = <10000000>; /* Updated by bootloader */
+                       interrupt-parent = <&gic>;
+                       interrupts = <0x0 0x4d 0x4>;
+               };
+
+               serial2: serial@1c022000 {
+                       status = "disabled";
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       reg = <0 0x1c022000 0x0 0x1000>;
+                       reg-shift = <2>;
+                       clock-frequency = <10000000>; /* Updated by bootloader */
+                       interrupt-parent = <&gic>;
+                       interrupts = <0x0 0x4e 0x4>;
+               };
+
+               serial3: serial@1c023000 {
+                       status = "disabled";
+                       device_type = "serial";
+                       compatible = "ns16550a";
+                       reg = <0 0x1c023000 0x0 0x1000>;
+                       reg-shift = <2>;
+                       clock-frequency = <10000000>; /* Updated by bootloader */
+                       interrupt-parent = <&gic>;
+                       interrupts = <0x0 0x4f 0x4>;
+               };
+
                phy1: phy@1f21a000 {
                        compatible = "apm,xgene-phy";
                        reg = <0x0 0x1f21a000 0x0 0x100>;
index 157e1d8..3421f31 100644 (file)
@@ -6,9 +6,18 @@ CONFIG_NO_HZ_IDLE=y
 CONFIG_HIGH_RES_TIMERS=y
 CONFIG_BSD_PROCESS_ACCT=y
 CONFIG_BSD_PROCESS_ACCT_V3=y
+CONFIG_TASKSTATS=y
+CONFIG_TASK_DELAY_ACCT=y
+CONFIG_TASK_XACCT=y
+CONFIG_TASK_IO_ACCOUNTING=y
 CONFIG_IKCONFIG=y
 CONFIG_IKCONFIG_PROC=y
 CONFIG_LOG_BUF_SHIFT=14
+CONFIG_RESOURCE_COUNTERS=y
+CONFIG_MEMCG=y
+CONFIG_MEMCG_SWAP=y
+CONFIG_MEMCG_KMEM=y
+CONFIG_CGROUP_HUGETLB=y
 # CONFIG_UTS_NS is not set
 # CONFIG_IPC_NS is not set
 # CONFIG_PID_NS is not set
@@ -27,6 +36,7 @@ CONFIG_ARCH_VEXPRESS=y
 CONFIG_ARCH_XGENE=y
 CONFIG_SMP=y
 CONFIG_PREEMPT=y
+CONFIG_KSM=y
 CONFIG_TRANSPARENT_HUGEPAGE=y
 CONFIG_CMA=y
 CONFIG_CMDLINE="console=ttyAMA0"
@@ -45,6 +55,7 @@ CONFIG_IP_PNP_BOOTP=y
 CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
 CONFIG_DEVTMPFS=y
 CONFIG_DMA_CMA=y
+CONFIG_BLK_DEV_LOOP=y
 CONFIG_VIRTIO_BLK=y
 # CONFIG_SCSI_PROC_FS is not set
 CONFIG_BLK_DEV_SD=y
@@ -53,6 +64,7 @@ CONFIG_ATA=y
 CONFIG_PATA_PLATFORM=y
 CONFIG_PATA_OF_PLATFORM=y
 CONFIG_NETDEVICES=y
+CONFIG_TUN=y
 CONFIG_SMC91X=y
 CONFIG_SMSC911X=y
 # CONFIG_WLAN is not set
@@ -85,6 +97,8 @@ CONFIG_EXT3_FS=y
 # CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
 # CONFIG_EXT3_FS_XATTR is not set
 CONFIG_EXT4_FS=y
+CONFIG_FANOTIFY=y
+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
 CONFIG_FUSE_FS=y
 CONFIG_CUSE=y
 CONFIG_VFAT_FS=y
@@ -104,6 +118,7 @@ CONFIG_DEBUG_KERNEL=y
 CONFIG_LOCKUP_DETECTOR=y
 # CONFIG_SCHED_DEBUG is not set
 # CONFIG_FTRACE is not set
+CONFIG_SECURITY=y
 CONFIG_CRYPTO_ANSI_CPRNG=y
 CONFIG_ARM64_CRYPTO=y
 CONFIG_CRYPTO_SHA1_ARM64_CE=y
index 60f2f4c..79cd911 100644 (file)
@@ -106,7 +106,7 @@ static int ecb_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -128,7 +128,7 @@ static int ecb_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
        for (first = 1; (blocks = (walk.nbytes / AES_BLOCK_SIZE)); first = 0) {
                aes_ecb_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -151,7 +151,7 @@ static int cbc_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_enc, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -174,7 +174,7 @@ static int cbc_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_cbc_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key_dec, rounds, blocks, walk.iv,
                                first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
        return err;
@@ -243,7 +243,7 @@ static int xts_encrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_enc, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
@@ -267,7 +267,7 @@ static int xts_decrypt(struct blkcipher_desc *desc, struct scatterlist *dst,
                aes_xts_decrypt(walk.dst.virt.addr, walk.src.virt.addr,
                                (u8 *)ctx->key1.key_dec, rounds, blocks,
                                (u8 *)ctx->key2.key_enc, walk.iv, first);
-               err = blkcipher_walk_done(desc, &walk, 0);
+               err = blkcipher_walk_done(desc, &walk, walk.nbytes % AES_BLOCK_SIZE);
        }
        kernel_neon_end();
 
index b9e6eaf..dc45701 100644 (file)
@@ -3,14 +3,6 @@
  *
  * Copyright (C) 2014 Linaro Ltd. <ard.biesheuvel@linaro.org>
  *
- * Based on arch/x86/crypto/ghash-pmullni-intel_asm.S
- *
- * Copyright (c) 2009 Intel Corp.
- *   Author: Huang Ying <ying.huang@intel.com>
- *           Vinodh Gopal
- *           Erdinc Ozturk
- *           Deniz Karakoyunlu
- *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License version 2 as published
  * by the Free Software Foundation.
 #include <linux/linkage.h>
 #include <asm/assembler.h>
 
-       DATA    .req    v0
-       SHASH   .req    v1
-       IN1     .req    v2
+       SHASH   .req    v0
+       SHASH2  .req    v1
        T1      .req    v2
        T2      .req    v3
-       T3      .req    v4
-       VZR     .req    v5
+       MASK    .req    v4
+       XL      .req    v5
+       XM      .req    v6
+       XH      .req    v7
+       IN1     .req    v7
 
        .text
        .arch           armv8-a+crypto
         *                         struct ghash_key const *k, const char *head)
         */
 ENTRY(pmull_ghash_update)
-       ld1             {DATA.16b}, [x1]
        ld1             {SHASH.16b}, [x3]
-       eor             VZR.16b, VZR.16b, VZR.16b
+       ld1             {XL.16b}, [x1]
+       movi            MASK.16b, #0xe1
+       ext             SHASH2.16b, SHASH.16b, SHASH.16b, #8
+       shl             MASK.2d, MASK.2d, #57
+       eor             SHASH2.16b, SHASH2.16b, SHASH.16b
 
        /* do the head block first, if supplied */
        cbz             x4, 0f
-       ld1             {IN1.2d}, [x4]
+       ld1             {T1.2d}, [x4]
        b               1f
 
-0:     ld1             {IN1.2d}, [x2], #16
+0:     ld1             {T1.2d}, [x2], #16
        sub             w0, w0, #1
-1:     ext             IN1.16b, IN1.16b, IN1.16b, #8
-CPU_LE(        rev64           IN1.16b, IN1.16b        )
-       eor             DATA.16b, DATA.16b, IN1.16b
 
-       /* multiply DATA by SHASH in GF(2^128) */
-       ext             T2.16b, DATA.16b, DATA.16b, #8
-       ext             T3.16b, SHASH.16b, SHASH.16b, #8
-       eor             T2.16b, T2.16b, DATA.16b
-       eor             T3.16b, T3.16b, SHASH.16b
+1:     /* multiply XL by SHASH in GF(2^128) */
+CPU_LE(        rev64           T1.16b, T1.16b  )
 
-       pmull2          T1.1q, SHASH.2d, DATA.2d        // a1 * b1
-       pmull           DATA.1q, SHASH.1d, DATA.1d      // a0 * b0
-       pmull           T2.1q, T2.1d, T3.1d             // (a1 + a0)(b1 + b0)
-       eor             T2.16b, T2.16b, T1.16b          // (a0 * b1) + (a1 * b0)
-       eor             T2.16b, T2.16b, DATA.16b
+       ext             T2.16b, XL.16b, XL.16b, #8
+       ext             IN1.16b, T1.16b, T1.16b, #8
+       eor             T1.16b, T1.16b, T2.16b
+       eor             XL.16b, XL.16b, IN1.16b
 
-       ext             T3.16b, VZR.16b, T2.16b, #8
-       ext             T2.16b, T2.16b, VZR.16b, #8
-       eor             DATA.16b, DATA.16b, T3.16b
-       eor             T1.16b, T1.16b, T2.16b  // <T1:DATA> is result of
-                                               // carry-less multiplication
+       pmull2          XH.1q, SHASH.2d, XL.2d          // a1 * b1
+       eor             T1.16b, T1.16b, XL.16b
+       pmull           XL.1q, SHASH.1d, XL.1d          // a0 * b0
+       pmull           XM.1q, SHASH2.1d, T1.1d         // (a1 + a0)(b1 + b0)
 
-       /* first phase of the reduction */
-       shl             T3.2d, DATA.2d, #1
-       eor             T3.16b, T3.16b, DATA.16b
-       shl             T3.2d, T3.2d, #5
-       eor             T3.16b, T3.16b, DATA.16b
-       shl             T3.2d, T3.2d, #57
-       ext             T2.16b, VZR.16b, T3.16b, #8
-       ext             T3.16b, T3.16b, VZR.16b, #8
-       eor             DATA.16b, DATA.16b, T2.16b
-       eor             T1.16b, T1.16b, T3.16b
+       ext             T1.16b, XL.16b, XH.16b, #8
+       eor             T2.16b, XL.16b, XH.16b
+       eor             XM.16b, XM.16b, T1.16b
+       eor             XM.16b, XM.16b, T2.16b
+       pmull           T2.1q, XL.1d, MASK.1d
 
-       /* second phase of the reduction */
-       ushr            T2.2d, DATA.2d, #5
-       eor             T2.16b, T2.16b, DATA.16b
-       ushr            T2.2d, T2.2d, #1
-       eor             T2.16b, T2.16b, DATA.16b
-       ushr            T2.2d, T2.2d, #1
-       eor             T1.16b, T1.16b, T2.16b
-       eor             DATA.16b, DATA.16b, T1.16b
+       mov             XH.d[0], XM.d[1]
+       mov             XM.d[1], XL.d[0]
+
+       eor             XL.16b, XM.16b, T2.16b
+       ext             T2.16b, XL.16b, XL.16b, #8
+       pmull           XL.1q, XL.1d, MASK.1d
+       eor             T2.16b, T2.16b, XH.16b
+       eor             XL.16b, XL.16b, T2.16b
 
        cbnz            w0, 0b
 
-       st1             {DATA.16b}, [x1]
+       st1             {XL.16b}, [x1]
        ret
 ENDPROC(pmull_ghash_update)
index b92baf3..833ec1e 100644 (file)
@@ -67,11 +67,12 @@ static int ghash_update(struct shash_desc *desc, const u8 *src,
                blocks = len / GHASH_BLOCK_SIZE;
                len %= GHASH_BLOCK_SIZE;
 
-               kernel_neon_begin_partial(6);
+               kernel_neon_begin_partial(8);
                pmull_ghash_update(blocks, ctx->digest, src, key,
                                   partial ? ctx->buf : NULL);
                kernel_neon_end();
                src += blocks * GHASH_BLOCK_SIZE;
+               partial = 0;
        }
        if (len)
                memcpy(ctx->buf + partial, src, len);
@@ -88,7 +89,7 @@ static int ghash_final(struct shash_desc *desc, u8 *dst)
 
                memset(ctx->buf + partial, 0, GHASH_BLOCK_SIZE - partial);
 
-               kernel_neon_begin_partial(6);
+               kernel_neon_begin_partial(8);
                pmull_ghash_update(1, ctx->digest, ctx->buf, key, NULL);
                kernel_neon_end();
        }
index 42c7eec..0b3fcf8 100644 (file)
@@ -30,7 +30,6 @@ generic-y += msgbuf.h
 generic-y += mutex.h
 generic-y += pci.h
 generic-y += poll.h
-generic-y += posix_types.h
 generic-y += preempt.h
 generic-y += resource.h
 generic-y += rwsem.h
index 3a4572e..dc82e52 100644 (file)
@@ -26,8 +26,6 @@
 #include <xen/xen.h>
 #include <asm/xen/hypervisor.h>
 
-#define ARCH_HAS_DMA_GET_REQUIRED_MASK
-
 #define DMA_ERROR_CODE (~(dma_addr_t)0)
 extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops coherent_swiotlb_dma_ops;
index 993bce5..902eb70 100644 (file)
@@ -56,6 +56,8 @@
 #define TASK_SIZE_32           UL(0x100000000)
 #define TASK_SIZE              (test_thread_flag(TIF_32BIT) ? \
                                TASK_SIZE_32 : TASK_SIZE_64)
+#define TASK_SIZE_OF(tsk)      (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
+                               TASK_SIZE_32 : TASK_SIZE_64)
 #else
 #define TASK_SIZE              TASK_SIZE_64
 #endif /* CONFIG_COMPAT */
index 598cc38..e0ccceb 100644 (file)
@@ -246,7 +246,7 @@ static inline pmd_t pte_pmd(pte_t pte)
 #define pmd_mkwrite(pmd)       pte_pmd(pte_mkwrite(pmd_pte(pmd)))
 #define pmd_mkdirty(pmd)       pte_pmd(pte_mkdirty(pmd_pte(pmd)))
 #define pmd_mkyoung(pmd)       pte_pmd(pte_mkyoung(pmd_pte(pmd)))
-#define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) &= ~PMD_TYPE_MASK))
+#define pmd_mknotpresent(pmd)  (__pmd(pmd_val(pmd) & ~PMD_TYPE_MASK))
 
 #define __HAVE_ARCH_PMD_WRITE
 #define pmd_write(pmd)         pte_write(pmd_pte(pmd))
@@ -292,7 +292,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
 #define pmd_sect(pmd)          ((pmd_val(pmd) & PMD_TYPE_MASK) == \
                                 PMD_TYPE_SECT)
 
-#ifdef ARM64_64K_PAGES
+#ifdef CONFIG_ARM64_64K_PAGES
 #define pud_sect(pud)          (0)
 #else
 #define pud_sect(pud)          ((pud_val(pud) & PUD_TYPE_MASK) == \
index a429b59..501000f 100644 (file)
 
 #include <uapi/asm/ptrace.h>
 
+/* Current Exception Level values, as contained in CurrentEL */
+#define CurrentEL_EL1          (1 << 2)
+#define CurrentEL_EL2          (2 << 2)
+
 /* AArch32-specific ptrace requests */
 #define COMPAT_PTRACE_GETREGS          12
 #define COMPAT_PTRACE_SETREGS          13
diff --git a/arch/arm64/include/uapi/asm/posix_types.h b/arch/arm64/include/uapi/asm/posix_types.h
new file mode 100644 (file)
index 0000000..7985ff6
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __ASM_POSIX_TYPES_H
+#define __ASM_POSIX_TYPES_H
+
+typedef unsigned short __kernel_old_uid_t;
+typedef unsigned short __kernel_old_gid_t;
+#define __kernel_old_uid_t __kernel_old_uid_t
+
+#include <asm-generic/posix_types.h>
+
+#endif /*  __ASM_POSIX_TYPES_H */
index b72cf40..ee469be 100644 (file)
@@ -58,7 +58,7 @@ struct fpsimd_context {
 
 struct esr_context {
        struct _aarch64_ctx head;
-       u64 esr;
+       __u64 esr;
 };
 
 #endif /* _UAPI__ASM_SIGCONTEXT_H */
index 66716c9..619b1dd 100644 (file)
@@ -78,8 +78,7 @@ ENTRY(efi_stub_entry)
 
        /* Turn off Dcache and MMU */
        mrs     x0, CurrentEL
-       cmp     x0, #PSR_MODE_EL2t
-       ccmp    x0, #PSR_MODE_EL2h, #0x4, ne
+       cmp     x0, #CurrentEL_EL2
        b.ne    1f
        mrs     x0, sctlr_el2
        bic     x0, x0, #1 << 0 // clear SCTLR.M
index 60e98a6..e786e6c 100644 (file)
@@ -12,8 +12,6 @@
 #include <linux/efi.h>
 #include <linux/libfdt.h>
 #include <asm/sections.h>
-#include <generated/compile.h>
-#include <generated/utsrelease.h>
 
 /*
  * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
index b051871..aa5f9fc 100644 (file)
@@ -205,7 +205,7 @@ ENDPROC(ftrace_graph_caller)
  *
  * Run ftrace_return_to_handler() before going back to parent.
  * @fp is checked against the value passed by ftrace_graph_caller()
- * only when CONFIG_FUNCTION_GRAPH_FP_TEST is enabled.
+ * only when CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST is enabled.
  */
 ENTRY(return_to_handler)
        str     x0, [sp, #-16]!
index bf017f4..9ce04ba 100644 (file)
@@ -279,7 +279,6 @@ el1_sp_pc:
         */
        mrs     x0, far_el1
        enable_dbg
-       mov     x1, x25
        mov     x2, sp
        b       do_sp_pc_abort
 el1_undef:
index a96d3a6..a2c1195 100644 (file)
@@ -270,8 +270,7 @@ ENDPROC(stext)
  */
 ENTRY(el2_setup)
        mrs     x0, CurrentEL
-       cmp     x0, #PSR_MODE_EL2t
-       ccmp    x0, #PSR_MODE_EL2h, #0x4, ne
+       cmp     x0, #CurrentEL_EL2
        b.ne    1f
        mrs     x0, sctlr_el2
 CPU_BE(        orr     x0, x0, #(1 << 25)      )       // Set the EE bit for EL2
index 3e926b9..9fde010 100644 (file)
@@ -655,11 +655,16 @@ static int compat_gpr_get(struct task_struct *target,
                        reg = task_pt_regs(target)->regs[idx];
                }
 
-               ret = copy_to_user(ubuf, &reg, sizeof(reg));
-               if (ret)
-                       break;
-
-               ubuf += sizeof(reg);
+               if (kbuf) {
+                       memcpy(kbuf, &reg, sizeof(reg));
+                       kbuf += sizeof(reg);
+               } else {
+                       ret = copy_to_user(ubuf, &reg, sizeof(reg));
+                       if (ret)
+                               break;
+
+                       ubuf += sizeof(reg);
+               }
        }
 
        return ret;
@@ -689,11 +694,16 @@ static int compat_gpr_set(struct task_struct *target,
                unsigned int idx = start + i;
                compat_ulong_t reg;
 
-               ret = copy_from_user(&reg, ubuf, sizeof(reg));
-               if (ret)
-                       return ret;
+               if (kbuf) {
+                       memcpy(&reg, kbuf, sizeof(reg));
+                       kbuf += sizeof(reg);
+               } else {
+                       ret = copy_from_user(&reg, ubuf, sizeof(reg));
+                       if (ret)
+                               return ret;
 
-               ubuf += sizeof(reg);
+                       ubuf += sizeof(reg);
+               }
 
                switch (idx) {
                case 15:
@@ -827,6 +837,7 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
                                    compat_ulong_t val)
 {
        int ret;
+       mm_segment_t old_fs = get_fs();
 
        if (off & 3 || off >= COMPAT_USER_SZ)
                return -EIO;
@@ -834,10 +845,13 @@ static int compat_ptrace_write_user(struct task_struct *tsk, compat_ulong_t off,
        if (off >= sizeof(compat_elf_gregset_t))
                return 0;
 
+       set_fs(KERNEL_DS);
        ret = copy_regset_from_user(tsk, &user_aarch32_view,
                                    REGSET_COMPAT_GPR, off,
                                    sizeof(compat_ulong_t),
                                    &val);
+       set_fs(old_fs);
+
        return ret;
 }
 
index 9aecbac..13bbc3b 100644 (file)
@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
        copy_page(kto, kfrom);
        __flush_dcache_area(kto, PAGE_SIZE);
 }
+EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
 
 void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
 {
        clear_page(kaddr);
 }
+EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
index e4193e3..0d64089 100644 (file)
@@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
                return;
 
        if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
-               __flush_dcache_area(page_address(page), PAGE_SIZE);
+               __flush_dcache_area(page_address(page),
+                               PAGE_SIZE << compound_order(page));
                __flush_icache_all();
        } else if (icache_is_aivivt()) {
                __flush_icache_all();
index 091d428..e90c542 100644 (file)
@@ -60,6 +60,17 @@ static int __init early_initrd(char *p)
 early_param("initrd", early_initrd);
 #endif
 
+/*
+ * Return the maximum physical address for ZONE_DMA (DMA_BIT_MASK(32)). It
+ * currently assumes that for memory starting above 4G, 32-bit devices will
+ * use a DMA offset.
+ */
+static phys_addr_t max_zone_dma_phys(void)
+{
+       phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
+       return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+}
+
 static void __init zone_sizes_init(unsigned long min, unsigned long max)
 {
        struct memblock_region *reg;
@@ -70,9 +81,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
 
        /* 4GB maximum for 32-bit only capable devices */
        if (IS_ENABLED(CONFIG_ZONE_DMA)) {
-               unsigned long max_dma_phys =
-                       (unsigned long)dma_to_phys(NULL, DMA_BIT_MASK(32) + 1);
-               max_dma = max(min, min(max, max_dma_phys >> PAGE_SHIFT));
+               max_dma = PFN_DOWN(max_zone_dma_phys());
                zone_size[ZONE_DMA] = max_dma - min;
        }
        zone_size[ZONE_NORMAL] = max - max_dma;
@@ -126,6 +135,8 @@ static void arm64_memory_present(void)
 
 void __init arm64_memblock_init(void)
 {
+       phys_addr_t dma_phys_limit = 0;
+
        /* Register the kernel text, kernel data and initrd with memblock */
        memblock_reserve(__pa(_text), _end - _text);
 #ifdef CONFIG_BLK_DEV_INITRD
@@ -141,7 +152,11 @@ void __init arm64_memblock_init(void)
        memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
 
        early_init_fdt_scan_reserved_mem();
-       dma_contiguous_reserve(0);
+
+       /* 4GB maximum for 32-bit only capable devices */
+       if (IS_ENABLED(CONFIG_ZONE_DMA))
+               dma_phys_limit = max_zone_dma_phys();
+       dma_contiguous_reserve(dma_phys_limit);
 
        memblock_allow_resize();
        memblock_dump_all();
index a7e9bfd..fcec5ce 100644 (file)
@@ -102,7 +102,7 @@ CONFIG_I2C_CHARDEV=y
 CONFIG_I2C_BLACKFIN_TWI=y
 CONFIG_I2C_BLACKFIN_TWI_CLK_KHZ=100
 CONFIG_SPI=y
-CONFIG_SPI_BFIN_V3=y
+CONFIG_SPI_ADI_V3=y
 CONFIG_GPIOLIB=y
 CONFIG_GPIO_SYSFS=y
 # CONFIG_HWMON is not set
index ba35864..c9eec84 100644 (file)
@@ -145,7 +145,7 @@ SECTIONS
 
        .text_l1 L1_CODE_START : AT(LOADADDR(.exit.data) + SIZEOF(.exit.data))
 #else
-       .init.data : AT(__data_lma + __data_len)
+       .init.data : AT(__data_lma + __data_len + 32)
        {
                __sinitdata = .;
                INIT_DATA
index 63b0e4f..0ccf0cf 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/spi/spi.h>
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <linux/i2c.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index c65c6db..1e7290e 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index af58454..c7495dc 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index a021122..6b988ad 100644 (file)
@@ -21,6 +21,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 90138e6..1fe7ff2 100644 (file)
@@ -2118,7 +2118,7 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.1",  "pinctrl-adi2.0", NULL, "can1"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-lq043",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-ac97.0",  "pinctrl-adi2.0", NULL, "sport0"),
@@ -2140,7 +2140,9 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("pata-bf54x",  "pinctrl-adi2.0", NULL, "atapi_alter"),
 #endif
        PIN_MAP_MUX_GROUP_DEFAULT("bf5xx-nand.0",  "pinctrl-adi2.0", NULL, "nfc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", NULL, "keys_4x4"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bf54x-keys",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "4bit",  "pinctrl-adi2.0", "keys_4x4grp", "keys"),
+       PIN_MAP_MUX_GROUP("bf54x-keys", "8bit",  "pinctrl-adi2.0", "keys_8x8grp", "keys"),
 };
 
 static int __init ezkit_init(void)
index 430b16d..6ab9515 100644 (file)
@@ -44,6 +44,7 @@
 #include <linux/spi/flash.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/jiffies.h>
 #include <linux/i2c-pca-platform.h>
 #include <linux/delay.h>
index 9f777df..e862f78 100644 (file)
@@ -18,6 +18,7 @@
 #endif
 #include <linux/ata_platform.h>
 #include <linux/irq.h>
+#include <linux/gpio.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
 #include <asm/portmux.h>
index 88dee43..2de71e8 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/spi/spi.h>
 #include <linux/irq.h>
 #include <linux/interrupt.h>
+#include <linux/gpio.h>
 #include <linux/delay.h>
 #include <asm/dma.h>
 #include <asm/bfin5xx_spi.h>
index 1ba4600..e2c0b02 100644 (file)
@@ -698,8 +698,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 {
 #define CONFIG_SMC_GCTL_VAL     0x00000010
 
-       if (!devm_pinctrl_get_select_default(&pdev->dev))
-               return -EBUSY;
        bfin_write32(SMC_GCTL, CONFIG_SMC_GCTL_VAL);
        bfin_write32(SMC_B0CTL, 0x01002011);
        bfin_write32(SMC_B0TIM, 0x08170977);
@@ -709,7 +707,6 @@ int bf609_nor_flash_init(struct platform_device *pdev)
 
 void bf609_nor_flash_exit(struct platform_device *pdev)
 {
-       devm_pinctrl_put(pdev->dev.pins->p);
        bfin_write32(SMC_GCTL, 0);
 }
 
@@ -2058,15 +2055,14 @@ static struct pinctrl_map __initdata bfin_pinmux_map[] = {
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-rotary",  "pinctrl-adi2.0", NULL, "rotary"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin_can.0",  "pinctrl-adi2.0", NULL, "can0"),
        PIN_MAP_MUX_GROUP_DEFAULT("physmap-flash.0",  "pinctrl-adi2.0", NULL, "smc0"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", NULL, "ppi2_16b"),
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#if IS_ENABLED(CONFIG_VIDEO_MT9M114)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_8b"),
-#elif IS_ENABLED(CONFIG_VIDEO_VS6624)
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_16b"),
-#else
-       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", NULL, "ppi0_24b"),
-#endif
+       PIN_MAP_MUX_GROUP_DEFAULT("bf609_nl8048.2",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "8bit",  "pinctrl-adi2.0", "ppi2_8bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_display.0",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_display.0", "16bit",  "pinctrl-adi2.0", "ppi2_16bgrp", "ppi2"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "8bit",  "pinctrl-adi2.0", "ppi0_8bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP_DEFAULT("bfin_capture.0",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "16bit",  "pinctrl-adi2.0", "ppi0_16bgrp", "ppi0"),
+       PIN_MAP_MUX_GROUP("bfin_capture.0", "24bit",  "pinctrl-adi2.0", "ppi0_24bgrp", "ppi0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-tdm.0",  "pinctrl-adi2.0", NULL, "sport0"),
        PIN_MAP_MUX_GROUP_DEFAULT("bfin-i2s.1",  "pinctrl-adi2.0", NULL, "sport1"),
index 3ca0fb9..a1efd93 100644 (file)
@@ -10,6 +10,7 @@
 #define __MACH_BF609_PM_H__
 
 #include <linux/suspend.h>
+#include <linux/platform_device.h>
 
 extern int bfin609_pm_enter(suspend_state_t state);
 extern int bf609_pm_prepare(void);
@@ -19,6 +20,6 @@ void bf609_hibernate(void);
 void bfin_sec_raise_irq(unsigned int sid);
 void coreb_enable(void);
 
-int bf609_nor_flash_init(void);
-void bf609_nor_flash_exit(void);
+int bf609_nor_flash_init(struct platform_device *pdev);
+void bf609_nor_flash_exit(struct platform_device *pdev);
 #endif
index 0cdd695..b1bfcf4 100644 (file)
@@ -291,13 +291,13 @@ static struct bfin_cpu_pm_fns bf609_cpu_pm = {
 #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE)
 static int smc_pm_syscore_suspend(void)
 {
-       bf609_nor_flash_exit();
+       bf609_nor_flash_exit(NULL);
        return 0;
 }
 
 static void smc_pm_syscore_resume(void)
 {
-       bf609_nor_flash_init();
+       bf609_nor_flash_init(NULL);
 }
 
 static struct syscore_ops smc_pm_syscore_ops = {
index 867b7ce..1f94784 100644 (file)
@@ -1208,8 +1208,6 @@ int __init init_arch_irq(void)
 
        bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
 
-       bfin_sec_set_priority(CONFIG_SEC_IRQ_PRIORITY_LEVELS, sec_int_priority);
-
        /* Enable interrupts IVG7-15 */
        bfin_irq_flags |= IMASK_IVG15 |
            IMASK_IVG14 | IMASK_IVG13 | IMASK_IVG12 | IMASK_IVG11 |
index 1a871b7..344387a 100644 (file)
@@ -242,7 +242,7 @@ struct ioc {
        struct pci_dev  *sac_only_dev;
 };
 
-static struct ioc *ioc_list;
+static struct ioc *ioc_list, *ioc_found;
 static int reserve_sba_gart = 1;
 
 static SBA_INLINE void sba_mark_invalid(struct ioc *, dma_addr_t, size_t);
@@ -1809,20 +1809,13 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
        { SX2000_IOC_ID, "sx2000", NULL },
 };
 
-static struct ioc *
-ioc_init(unsigned long hpa, void *handle)
+static void ioc_init(unsigned long hpa, struct ioc *ioc)
 {
-       struct ioc *ioc;
        struct ioc_iommu *info;
 
-       ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
-       if (!ioc)
-               return NULL;
-
        ioc->next = ioc_list;
        ioc_list = ioc;
 
-       ioc->handle = handle;
        ioc->ioc_hpa = ioremap(hpa, 0x1000);
 
        ioc->func_id = READ_REG(ioc->ioc_hpa + IOC_FUNC_ID);
@@ -1863,8 +1856,6 @@ ioc_init(unsigned long hpa, void *handle)
                "%s %d.%d HPA 0x%lx IOVA space %dMb at 0x%lx\n",
                ioc->name, (ioc->rev >> 4) & 0xF, ioc->rev & 0xF,
                hpa, ioc->iov_size >> 20, ioc->ibase);
-
-       return ioc;
 }
 
 
@@ -2031,22 +2022,21 @@ sba_map_ioc_to_node(struct ioc *ioc, acpi_handle handle)
 #endif
 }
 
-static int
-acpi_sba_ioc_add(struct acpi_device *device,
-                const struct acpi_device_id *not_used)
+static void acpi_sba_ioc_add(struct ioc *ioc)
 {
-       struct ioc *ioc;
+       acpi_handle handle = ioc->handle;
        acpi_status status;
        u64 hpa, length;
        struct acpi_device_info *adi;
 
-       status = hp_acpi_csr_space(device->handle, &hpa, &length);
+       ioc_found = ioc->next;
+       status = hp_acpi_csr_space(handle, &hpa, &length);
        if (ACPI_FAILURE(status))
-               return 1;
+               goto err;
 
-       status = acpi_get_object_info(device->handle, &adi);
+       status = acpi_get_object_info(handle, &adi);
        if (ACPI_FAILURE(status))
-               return 1;
+               goto err;
 
        /*
         * For HWP0001, only SBA appears in ACPI namespace.  It encloses the PCI
@@ -2067,13 +2057,13 @@ acpi_sba_ioc_add(struct acpi_device *device,
        if (!iovp_shift)
                iovp_shift = 12;
 
-       ioc = ioc_init(hpa, device->handle);
-       if (!ioc)
-               return 1;
-
+       ioc_init(hpa, ioc);
        /* setup NUMA node association */
-       sba_map_ioc_to_node(ioc, device->handle);
-       return 0;
+       sba_map_ioc_to_node(ioc, handle);
+       return;
+
+ err:
+       kfree(ioc);
 }
 
 static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
@@ -2081,9 +2071,26 @@ static const struct acpi_device_id hp_ioc_iommu_device_ids[] = {
        {"HWP0004", 0},
        {"", 0},
 };
+
+static int acpi_sba_ioc_attach(struct acpi_device *device,
+                              const struct acpi_device_id *not_used)
+{
+       struct ioc *ioc;
+
+       ioc = kzalloc(sizeof(*ioc), GFP_KERNEL);
+       if (!ioc)
+               return -ENOMEM;
+
+       ioc->next = ioc_found;
+       ioc_found = ioc;
+       ioc->handle = device->handle;
+       return 1;
+}
+
+
 static struct acpi_scan_handler acpi_sba_ioc_handler = {
        .ids    = hp_ioc_iommu_device_ids,
-       .attach = acpi_sba_ioc_add,
+       .attach = acpi_sba_ioc_attach,
 };
 
 static int __init acpi_sba_ioc_init_acpi(void)
@@ -2118,9 +2125,12 @@ sba_init(void)
 #endif
 
        /*
-        * ioc_list should be populated by the acpi_sba_ioc_handler's .attach()
+        * ioc_found should be populated by the acpi_sba_ioc_handler's .attach()
         * routine, but that only happens if acpi_scan_init() has already run.
         */
+       while (ioc_found)
+               acpi_sba_ioc_add(ioc_found);
+
        if (!ioc_list) {
 #ifdef CONFIG_IA64_GENERIC
                /*
index 1dd275d..7b48587 100644 (file)
@@ -8,6 +8,7 @@
 #define force_o_largefile()    \
                (personality(current->personality) != PER_LINUX32)
 
+#include <linux/personality.h>
 #include <asm-generic/fcntl.h>
 
 #endif /* _ASM_IA64_FCNTL_H */
index dbb118e..a547884 100644 (file)
@@ -921,7 +921,8 @@ L(nocon):
        jls     1f
        lsrl    #1,%d1
 1:
-       movel   %d1,m68k_init_mapped_size
+       lea     %pc@(m68k_init_mapped_size),%a0
+       movel   %d1,%a0@
        mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
                %pc@(m68k_supervisor_cachemode)
 
index 958f1ad..3857737 100644 (file)
@@ -11,6 +11,7 @@
  */
 
 #include <linux/errno.h>
+#include <linux/export.h>
 #include <linux/module.h>
 #include <linux/sched.h>
 #include <linux/kernel.h>
@@ -30,6 +31,7 @@
 
 
 unsigned long (*mach_random_get_entropy)(void);
+EXPORT_SYMBOL_GPL(mach_random_get_entropy);
 
 
 /*
index 7a469ac..4e238e6 100644 (file)
@@ -269,6 +269,7 @@ config LANTIQ
 config LASAT
        bool "LASAT Networks platforms"
        select CEVT_R4K
+       select CRC32
        select CSRC_R4K
        select DMA_NONCOHERENT
        select SYS_HAS_EARLY_PRINTK
index f54bdbe..eeeb0f4 100644 (file)
@@ -32,8 +32,6 @@ struct sigcontext32 {
        __u32           sc_lo2;
        __u32           sc_hi3;
        __u32           sc_lo3;
-       __u64           sc_msaregs[32]; /* Most significant 64 bits */
-       __u32           sc_msa_csr;
 };
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
 #endif /* _ASM_SIGCONTEXT_H */
index f8d63b3..708c5d4 100644 (file)
@@ -67,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 #define Ip_u2s3u1(op)                                                  \
 void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
 
+#define Ip_s3s1s2(op)                                                  \
+void ISAOPC(op)(u32 **buf, int a, int b, int c)
+
 #define Ip_u2u1s3(op)                                                  \
 void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
 
@@ -147,6 +150,7 @@ Ip_u2s3u1(_scd);
 Ip_u2s3u1(_sd);
 Ip_u2u1u3(_sll);
 Ip_u3u2u1(_sllv);
+Ip_s3s1s2(_slt);
 Ip_u2u1s3(_sltiu);
 Ip_u3u1u2(_sltu);
 Ip_u2u1u3(_sra);
index 4b71602..4bfdb9d 100644 (file)
@@ -273,6 +273,7 @@ enum mm_32a_minor_op {
        mm_and_op = 0x250,
        mm_or32_op = 0x290,
        mm_xor32_op = 0x310,
+       mm_slt_op = 0x350,
        mm_sltu_op = 0x390,
 };
 
index 681c176..6c9906f 100644 (file)
 #include <linux/types.h>
 #include <asm/sgidefs.h>
 
-/* Bits which may be set in sc_used_math */
-#define USEDMATH_FP    (1 << 0)
-#define USEDMATH_MSA   (1 << 1)
-
 #if _MIPS_SIM == _MIPS_SIM_ABI32
 
 /*
@@ -41,8 +37,6 @@ struct sigcontext {
        unsigned long           sc_lo2;
        unsigned long           sc_hi3;
        unsigned long           sc_lo3;
-       unsigned long long      sc_msaregs[32]; /* Most significant 64 bits */
-       unsigned long           sc_msa_csr;
 };
 
 #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -76,8 +70,6 @@ struct sigcontext {
        __u32   sc_used_math;
        __u32   sc_dsp;
        __u32   sc_reserved;
-       __u64   sc_msaregs[32];
-       __u32   sc_msa_csr;
 };
 
 
index 02f075d..4bb5107 100644 (file)
@@ -293,7 +293,6 @@ void output_sc_defines(void)
        OFFSET(SC_LO2, sigcontext, sc_lo2);
        OFFSET(SC_HI3, sigcontext, sc_hi3);
        OFFSET(SC_LO3, sigcontext, sc_lo3);
-       OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
        BLANK();
 }
 #endif
@@ -308,7 +307,6 @@ void output_sc_defines(void)
        OFFSET(SC_MDLO, sigcontext, sc_mdlo);
        OFFSET(SC_PC, sigcontext, sc_pc);
        OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
-       OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
        BLANK();
 }
 #endif
@@ -320,7 +318,6 @@ void output_sc32_defines(void)
        OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
        OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
        OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
-       OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
        BLANK();
 }
 #endif
index 4858642..a734b2c 100644 (file)
@@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
 
        board_bind_eic_interrupt = &msc_bind_eic_interrupt;
 
-       for (; nirq >= 0; nirq--, imp++) {
+       for (; nirq > 0; nirq--, imp++) {
                int n = imp->im_irq;
 
                switch (imp->im_type) {
index 5aa4c6f..c4c2069 100644 (file)
@@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
        if (!coupled_coherence)
                return;
 
-       smp_mb__before_atomic_inc();
+       smp_mb__before_atomic();
        atomic_inc(a);
 
        while (atomic_read(a) < online)
@@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
 
        /* Indicate that this CPU might not be coherent */
        cpumask_clear_cpu(cpu, &cpu_coherent_mask);
-       smp_mb__after_clear_bit();
+       smp_mb__after_atomic();
 
        /* Create a non-coherent mapping of the core ready_count */
        core_ready_count = per_cpu(ready_count, core);
index 7181427..8352523 100644 (file)
@@ -13,7 +13,6 @@
  * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
  */
 #include <asm/asm.h>
-#include <asm/asmmacro.h>
 #include <asm/errno.h>
 #include <asm/fpregdef.h>
 #include <asm/mipsregs.h>
@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
        END(_restore_fp_context32)
 #endif
 
-#ifdef CONFIG_CPU_HAS_MSA
-
-       .macro  save_sc_msareg  wr, off, sc, tmp
-#ifdef CONFIG_64BIT
-       copy_u_d \tmp, \wr, 1
-       EX sd   \tmp, (\off+(\wr*8))(\sc)
-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
-       copy_u_w \tmp, \wr, 2
-       EX sw   \tmp, (\off+(\wr*8)+0)(\sc)
-       copy_u_w \tmp, \wr, 3
-       EX sw   \tmp, (\off+(\wr*8)+4)(\sc)
-#else /* CONFIG_CPU_BIG_ENDIAN */
-       copy_u_w \tmp, \wr, 2
-       EX sw   \tmp, (\off+(\wr*8)+4)(\sc)
-       copy_u_w \tmp, \wr, 3
-       EX sw   \tmp, (\off+(\wr*8)+0)(\sc)
-#endif
-       .endm
-
-/*
- * int _save_msa_context(struct sigcontext *sc)
- *
- * Save the upper 64 bits of each vector register along with the MSA_CSR
- * register into sc. Returns zero on success, else non-zero.
- */
-LEAF(_save_msa_context)
-       save_sc_msareg  0, SC_MSAREGS, a0, t0
-       save_sc_msareg  1, SC_MSAREGS, a0, t0
-       save_sc_msareg  2, SC_MSAREGS, a0, t0
-       save_sc_msareg  3, SC_MSAREGS, a0, t0
-       save_sc_msareg  4, SC_MSAREGS, a0, t0
-       save_sc_msareg  5, SC_MSAREGS, a0, t0
-       save_sc_msareg  6, SC_MSAREGS, a0, t0
-       save_sc_msareg  7, SC_MSAREGS, a0, t0
-       save_sc_msareg  8, SC_MSAREGS, a0, t0
-       save_sc_msareg  9, SC_MSAREGS, a0, t0
-       save_sc_msareg  10, SC_MSAREGS, a0, t0
-       save_sc_msareg  11, SC_MSAREGS, a0, t0
-       save_sc_msareg  12, SC_MSAREGS, a0, t0
-       save_sc_msareg  13, SC_MSAREGS, a0, t0
-       save_sc_msareg  14, SC_MSAREGS, a0, t0
-       save_sc_msareg  15, SC_MSAREGS, a0, t0
-       save_sc_msareg  16, SC_MSAREGS, a0, t0
-       save_sc_msareg  17, SC_MSAREGS, a0, t0
-       save_sc_msareg  18, SC_MSAREGS, a0, t0
-       save_sc_msareg  19, SC_MSAREGS, a0, t0
-       save_sc_msareg  20, SC_MSAREGS, a0, t0
-       save_sc_msareg  21, SC_MSAREGS, a0, t0
-       save_sc_msareg  22, SC_MSAREGS, a0, t0
-       save_sc_msareg  23, SC_MSAREGS, a0, t0
-       save_sc_msareg  24, SC_MSAREGS, a0, t0
-       save_sc_msareg  25, SC_MSAREGS, a0, t0
-       save_sc_msareg  26, SC_MSAREGS, a0, t0
-       save_sc_msareg  27, SC_MSAREGS, a0, t0
-       save_sc_msareg  28, SC_MSAREGS, a0, t0
-       save_sc_msareg  29, SC_MSAREGS, a0, t0
-       save_sc_msareg  30, SC_MSAREGS, a0, t0
-       save_sc_msareg  31, SC_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_save_msa_context)
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * int _save_msa_context32(struct sigcontext32 *sc)
- *
- * Save the upper 64 bits of each vector register along with the MSA_CSR
- * register into sc. Returns zero on success, else non-zero.
- */
-LEAF(_save_msa_context32)
-       save_sc_msareg  0, SC32_MSAREGS, a0, t0
-       save_sc_msareg  1, SC32_MSAREGS, a0, t0
-       save_sc_msareg  2, SC32_MSAREGS, a0, t0
-       save_sc_msareg  3, SC32_MSAREGS, a0, t0
-       save_sc_msareg  4, SC32_MSAREGS, a0, t0
-       save_sc_msareg  5, SC32_MSAREGS, a0, t0
-       save_sc_msareg  6, SC32_MSAREGS, a0, t0
-       save_sc_msareg  7, SC32_MSAREGS, a0, t0
-       save_sc_msareg  8, SC32_MSAREGS, a0, t0
-       save_sc_msareg  9, SC32_MSAREGS, a0, t0
-       save_sc_msareg  10, SC32_MSAREGS, a0, t0
-       save_sc_msareg  11, SC32_MSAREGS, a0, t0
-       save_sc_msareg  12, SC32_MSAREGS, a0, t0
-       save_sc_msareg  13, SC32_MSAREGS, a0, t0
-       save_sc_msareg  14, SC32_MSAREGS, a0, t0
-       save_sc_msareg  15, SC32_MSAREGS, a0, t0
-       save_sc_msareg  16, SC32_MSAREGS, a0, t0
-       save_sc_msareg  17, SC32_MSAREGS, a0, t0
-       save_sc_msareg  18, SC32_MSAREGS, a0, t0
-       save_sc_msareg  19, SC32_MSAREGS, a0, t0
-       save_sc_msareg  20, SC32_MSAREGS, a0, t0
-       save_sc_msareg  21, SC32_MSAREGS, a0, t0
-       save_sc_msareg  22, SC32_MSAREGS, a0, t0
-       save_sc_msareg  23, SC32_MSAREGS, a0, t0
-       save_sc_msareg  24, SC32_MSAREGS, a0, t0
-       save_sc_msareg  25, SC32_MSAREGS, a0, t0
-       save_sc_msareg  26, SC32_MSAREGS, a0, t0
-       save_sc_msareg  27, SC32_MSAREGS, a0, t0
-       save_sc_msareg  28, SC32_MSAREGS, a0, t0
-       save_sc_msareg  29, SC32_MSAREGS, a0, t0
-       save_sc_msareg  30, SC32_MSAREGS, a0, t0
-       save_sc_msareg  31, SC32_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_save_msa_context32)
-
-#endif /* CONFIG_MIPS32_COMPAT */
-
-       .macro restore_sc_msareg        wr, off, sc, tmp
-#ifdef CONFIG_64BIT
-       EX ld   \tmp, (\off+(\wr*8))(\sc)
-       insert_d \wr, 1, \tmp
-#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
-       EX lw   \tmp, (\off+(\wr*8)+0)(\sc)
-       insert_w \wr, 2, \tmp
-       EX lw   \tmp, (\off+(\wr*8)+4)(\sc)
-       insert_w \wr, 3, \tmp
-#else /* CONFIG_CPU_BIG_ENDIAN */
-       EX lw   \tmp, (\off+(\wr*8)+4)(\sc)
-       insert_w \wr, 2, \tmp
-       EX lw   \tmp, (\off+(\wr*8)+0)(\sc)
-       insert_w \wr, 3, \tmp
-#endif
-       .endm
-
-/*
- * int _restore_msa_context(struct sigcontext *sc)
- */
-LEAF(_restore_msa_context)
-       restore_sc_msareg       0, SC_MSAREGS, a0, t0
-       restore_sc_msareg       1, SC_MSAREGS, a0, t0
-       restore_sc_msareg       2, SC_MSAREGS, a0, t0
-       restore_sc_msareg       3, SC_MSAREGS, a0, t0
-       restore_sc_msareg       4, SC_MSAREGS, a0, t0
-       restore_sc_msareg       5, SC_MSAREGS, a0, t0
-       restore_sc_msareg       6, SC_MSAREGS, a0, t0
-       restore_sc_msareg       7, SC_MSAREGS, a0, t0
-       restore_sc_msareg       8, SC_MSAREGS, a0, t0
-       restore_sc_msareg       9, SC_MSAREGS, a0, t0
-       restore_sc_msareg       10, SC_MSAREGS, a0, t0
-       restore_sc_msareg       11, SC_MSAREGS, a0, t0
-       restore_sc_msareg       12, SC_MSAREGS, a0, t0
-       restore_sc_msareg       13, SC_MSAREGS, a0, t0
-       restore_sc_msareg       14, SC_MSAREGS, a0, t0
-       restore_sc_msareg       15, SC_MSAREGS, a0, t0
-       restore_sc_msareg       16, SC_MSAREGS, a0, t0
-       restore_sc_msareg       17, SC_MSAREGS, a0, t0
-       restore_sc_msareg       18, SC_MSAREGS, a0, t0
-       restore_sc_msareg       19, SC_MSAREGS, a0, t0
-       restore_sc_msareg       20, SC_MSAREGS, a0, t0
-       restore_sc_msareg       21, SC_MSAREGS, a0, t0
-       restore_sc_msareg       22, SC_MSAREGS, a0, t0
-       restore_sc_msareg       23, SC_MSAREGS, a0, t0
-       restore_sc_msareg       24, SC_MSAREGS, a0, t0
-       restore_sc_msareg       25, SC_MSAREGS, a0, t0
-       restore_sc_msareg       26, SC_MSAREGS, a0, t0
-       restore_sc_msareg       27, SC_MSAREGS, a0, t0
-       restore_sc_msareg       28, SC_MSAREGS, a0, t0
-       restore_sc_msareg       29, SC_MSAREGS, a0, t0
-       restore_sc_msareg       30, SC_MSAREGS, a0, t0
-       restore_sc_msareg       31, SC_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_restore_msa_context)
-
-#ifdef CONFIG_MIPS32_COMPAT
-
-/*
- * int _restore_msa_context32(struct sigcontext32 *sc)
- */
-LEAF(_restore_msa_context32)
-       restore_sc_msareg       0, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       1, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       2, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       3, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       4, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       5, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       6, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       7, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       8, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       9, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       10, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       11, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       12, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       13, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       14, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       15, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       16, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       17, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       18, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       19, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       20, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       21, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       22, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       23, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       24, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       25, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       26, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       27, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       28, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       29, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       30, SC32_MSAREGS, a0, t0
-       restore_sc_msareg       31, SC32_MSAREGS, a0, t0
-       jr      ra
-        li     v0, 0
-       END(_restore_msa_context32)
-
-#endif /* CONFIG_MIPS32_COMPAT */
-
-#endif /* CONFIG_CPU_HAS_MSA */
-
        .set    reorder
 
        .type   fault@function
index 33133d3..9e60d11 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/bitops.h>
 #include <asm/cacheflush.h>
 #include <asm/fpu.h>
-#include <asm/msa.h>
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/cpu-features.h>
@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
 extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
 extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
 
-extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
-extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
-
 struct sigframe {
        u32 sf_ass[4];          /* argument save space for o32 */
        u32 sf_pad[2];          /* Was: signal trampoline */
@@ -99,61 +95,21 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
        return err;
 }
 
-/*
- * These functions will save only the upper 64 bits of the vector registers,
- * since the lower 64 bits have already been saved as the scalar FP context.
- */
-static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
-{
-       int i;
-       int err = 0;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |=
-                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
-                              &sc->sc_msaregs[i]);
-       }
-       err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
-static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
-{
-       int i;
-       int err = 0;
-       u64 val;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |= __get_user(val, &sc->sc_msaregs[i]);
-               set_fpr64(&current->thread.fpu.fpr[i], 1, val);
-       }
-       err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
 /*
  * Helper routines
  */
-static int protected_save_fp_context(struct sigcontext __user *sc,
-                                    unsigned used_math)
+static int protected_save_fp_context(struct sigcontext __user *sc)
 {
        int err;
-       bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 #ifndef CONFIG_EVA
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = save_fp_context(sc);
-                       if (save_msa && !err)
-                               err = _save_msa_context(sc);
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_to_sigcontext(sc);
-                       if (save_msa && !err)
-                               err = copy_msa_to_sigcontext(sc);
                }
                if (likely(!err))
                        break;
@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
         * EVA does not have FPU EVA instructions so saving fpu context directly
         * does not work.
         */
-       disable_msa();
        lose_fpu(1);
        err = save_fp_context(sc); /* this might fail */
-       if (save_msa && !err)
-               err = copy_msa_to_sigcontext(sc);
 #endif
        return err;
 }
 
-static int protected_restore_fp_context(struct sigcontext __user *sc,
-                                       unsigned used_math)
+static int protected_restore_fp_context(struct sigcontext __user *sc)
 {
        int err, tmp __maybe_unused;
-       bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
 #ifndef CONFIG_EVA
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = restore_fp_context(sc);
-                       if (restore_msa && !err) {
-                               enable_msa();
-                               err = _restore_msa_context(sc);
-                       } else {
-                               /* signal handler may have used MSA */
-                               disable_msa();
-                       }
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_from_sigcontext(sc);
-                       if (!err && (used_math & USEDMATH_MSA))
-                               err = copy_msa_from_sigcontext(sc);
                }
                if (likely(!err))
                        break;
@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
         * EVA does not have FPU EVA instructions so restoring fpu context
         * directly does not work.
         */
-       enable_msa();
        lose_fpu(0);
        err = restore_fp_context(sc); /* this might fail */
-       if (restore_msa && !err)
-               err = copy_msa_from_sigcontext(sc);
 #endif
        return err;
 }
@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
        }
 
-       used_math = used_math() ? USEDMATH_FP : 0;
-       used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
+       used_math = !!used_math();
        err |= __put_user(used_math, &sc->sc_used_math);
 
        if (used_math) {
@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                 * Save FPU state to signal context. Signal handler
                 * will "inherit" current FPU state.
                 */
-               err |= protected_save_fp_context(sc, used_math);
+               err |= protected_save_fp_context(sc);
        }
        return err;
 }
@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
 }
 
 static int
-check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math)
+check_and_restore_fp_context(struct sigcontext __user *sc)
 {
        int err, sig;
 
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= protected_restore_fp_context(sc, used_math);
+       err |= protected_restore_fp_context(sc);
        return err ?: sig;
 }
 
@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
        if (used_math) {
                /* restore fpu context if we have used it before */
                if (!err)
-                       err = check_and_restore_fp_context(sc, used_math);
+                       err = check_and_restore_fp_context(sc);
        } else {
-               /* signal handler may have used FPU or MSA. Disable them. */
-               disable_msa();
+               /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
        }
 
index 299f956..bae2e6e 100644 (file)
@@ -30,7 +30,6 @@
 #include <asm/sim.h>
 #include <asm/ucontext.h>
 #include <asm/fpu.h>
-#include <asm/msa.h>
 #include <asm/war.h>
 #include <asm/vdso.h>
 #include <asm/dsp.h>
@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
 extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
 extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
 
-extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
-extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
-
 /*
  * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
  */
@@ -114,60 +110,20 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
        return err;
 }
 
-/*
- * These functions will save only the upper 64 bits of the vector registers,
- * since the lower 64 bits have already been saved as the scalar FP context.
- */
-static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
-{
-       int i;
-       int err = 0;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |=
-                   __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
-                              &sc->sc_msaregs[i]);
-       }
-       err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
-static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
-{
-       int i;
-       int err = 0;
-       u64 val;
-
-       for (i = 0; i < NUM_FPU_REGS; i++) {
-               err |= __get_user(val, &sc->sc_msaregs[i]);
-               set_fpr64(&current->thread.fpu.fpr[i], 1, val);
-       }
-       err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
-
-       return err;
-}
-
 /*
  * sigcontext handlers
  */
-static int protected_save_fp_context32(struct sigcontext32 __user *sc,
-                                      unsigned used_math)
+static int protected_save_fp_context32(struct sigcontext32 __user *sc)
 {
        int err;
-       bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = save_fp_context32(sc);
-                       if (save_msa && !err)
-                               err = _save_msa_context32(sc);
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_to_sigcontext32(sc);
-                       if (save_msa && !err)
-                               err = copy_msa_to_sigcontext32(sc);
                }
                if (likely(!err))
                        break;
@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
        return err;
 }
 
-static int protected_restore_fp_context32(struct sigcontext32 __user *sc,
-                                         unsigned used_math)
+static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
 {
        int err, tmp __maybe_unused;
-       bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
        while (1) {
                lock_fpu_owner();
                if (is_fpu_owner()) {
                        err = restore_fp_context32(sc);
-                       if (restore_msa && !err) {
-                               enable_msa();
-                               err = _restore_msa_context32(sc);
-                       } else {
-                               /* signal handler may have used MSA */
-                               disable_msa();
-                       }
                        unlock_fpu_owner();
                } else {
                        unlock_fpu_owner();
                        err = copy_fp_from_sigcontext32(sc);
-                       if (restore_msa && !err)
-                               err = copy_msa_from_sigcontext32(sc);
                }
                if (likely(!err))
                        break;
@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
                err |= __put_user(mflo3(), &sc->sc_lo3);
        }
 
-       used_math = used_math() ? USEDMATH_FP : 0;
-       used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
+       used_math = !!used_math();
        err |= __put_user(used_math, &sc->sc_used_math);
 
        if (used_math) {
@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
                 * Save FPU state to signal context.  Signal handler
                 * will "inherit" current FPU state.
                 */
-               err |= protected_save_fp_context32(sc, used_math);
+               err |= protected_save_fp_context32(sc);
        }
        return err;
 }
 
 static int
-check_and_restore_fp_context32(struct sigcontext32 __user *sc,
-                              unsigned used_math)
+check_and_restore_fp_context32(struct sigcontext32 __user *sc)
 {
        int err, sig;
 
        err = sig = fpcsr_pending(&sc->sc_fpc_csr);
        if (err > 0)
                err = 0;
-       err |= protected_restore_fp_context32(sc, used_math);
+       err |= protected_restore_fp_context32(sc);
        return err ?: sig;
 }
 
@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
        if (used_math) {
                /* restore fpu context if we have used it before */
                if (!err)
-                       err = check_and_restore_fp_context32(sc, used_math);
+                       err = check_and_restore_fp_context32(sc);
        } else {
-               /* signal handler may have used FPU or MSA. Disable them. */
-               disable_msa();
+               /* signal handler may have used FPU.  Give it up. */
                lose_fpu(0);
        }
 
index df0598d..949f2c6 100644 (file)
@@ -301,7 +301,7 @@ static int cps_cpu_disable(void)
 
        core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
        atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
-       smp_mb__after_atomic_dec();
+       smp_mb__after_atomic();
        set_cpu_online(cpu, false);
        cpu_clear(cpu, cpu_callin_map);
 
index cd5e4f5..f3c56a1 100644 (file)
@@ -384,6 +384,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
 
        kfree(vcpu->arch.guest_ebase);
        kfree(vcpu->arch.kseg0_commpage);
+       kfree(vcpu);
 }
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
index 53f1d22..8e97acb 100644 (file)
  * Special constants
  */
 
-#define DPCNST(s, b, m)                                                        \
+/*
+ * Older GCC requires the inner braces for initialization of union ieee754dp's
+ * anonymous struct member.  Without an error will result.
+ */
+#define xPCNST(s, b, m, ebias)                                         \
 {                                                                      \
-       .sign   = (s),                                                  \
-       .bexp   = (b) + DP_EBIAS,                                       \
-       .mant   = (m)                                                   \
+       {                                                               \
+               .sign   = (s),                                          \
+               .bexp   = (b) + ebias,                                  \
+               .mant   = (m)                                           \
+       }                                                               \
 }
 
+#define DPCNST(s, b, m)                                                        \
+       xPCNST(s, b, m, DP_EBIAS)
+
 const union ieee754dp __ieee754dp_spcvals[] = {
        DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL),     /* + zero   */
        DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL),     /* - zero   */
@@ -62,11 +71,7 @@ const union ieee754dp __ieee754dp_spcvals[] = {
 };
 
 #define SPCNST(s, b, m)                                                        \
-{                                                                      \
-       .sign   = (s),                                                  \
-       .bexp   = (b) + SP_EBIAS,                                       \
-       .mant   = (m)                                                   \
-}
+       xPCNST(s, b, m, SP_EBIAS)
 
 const union ieee754sp __ieee754sp_spcvals[] = {
        SPCNST(0, SP_EMIN - 1, 0x000000),       /* + zero   */
index 775c280..8399ddf 100644 (file)
@@ -102,6 +102,7 @@ static struct insn insn_table_MM[] = {
        { insn_sd, 0, 0 },
        { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
        { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
+       { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
        { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
        { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
        { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
index 38792c2..6708a2d 100644 (file)
@@ -89,7 +89,7 @@ static struct insn insn_table[] = {
        { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_ld,  M(ld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
-       { insn_lh,  M(lw_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
+       { insn_lh,  M(lh_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lld,  M(lld_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_ll,  M(ll_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_lui,  M(lui_op, 0, 0, 0, 0, 0),  RT | SIMM },
@@ -110,6 +110,7 @@ static struct insn insn_table[] = {
        { insn_sd,  M(sd_op, 0, 0, 0, 0, 0),  RS | RT | SIMM },
        { insn_sll,  M(spec_op, 0, 0, 0, 0, sll_op),  RT | RD | RE },
        { insn_sllv,  M(spec_op, 0, 0, 0, 0, sllv_op),  RS | RT | RD },
+       { insn_slt,  M(spec_op, 0, 0, 0, 0, slt_op),  RS | RT | RD },
        { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
        { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
        { insn_sra,  M(spec_op, 0, 0, 0, 0, sra_op),  RT | RD | RE },
index 0051580..a01b0d6 100644 (file)
@@ -53,7 +53,7 @@ enum opcode {
        insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
        insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
        insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
-       insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra,
+       insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
        insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
        insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
        insn_xor, insn_xori, insn_yield,
@@ -139,6 +139,13 @@ Ip_u1u2u3(op)                                              \
 }                                                      \
 UASM_EXPORT_SYMBOL(uasm_i##op);
 
+#define I_s3s1s2(op)                                   \
+Ip_s3s1s2(op)                                          \
+{                                                      \
+       build_insn(buf, insn##op, b, c, a);             \
+}                                                      \
+UASM_EXPORT_SYMBOL(uasm_i##op);
+
 #define I_u2u1u3(op)                                   \
 Ip_u2u1u3(op)                                          \
 {                                                      \
@@ -289,6 +296,7 @@ I_u2s3u1(_scd)
 I_u2s3u1(_sd)
 I_u2u1u3(_sll)
 I_u3u2u1(_sllv)
+I_s3s1s2(_slt)
 I_u2u1s3(_sltiu)
 I_u3u1u2(_sltu)
 I_u2u1u3(_sra)
index a67b975..05a5661 100644 (file)
 /* Arguments used by JIT */
 #define ARGS_USED_BY_JIT       2 /* only applicable to 64-bit */
 
-#define FLAG_NEED_X_RESET      (1 << 0)
-
 #define SBIT(x)                        (1 << (x)) /* Signed version of BIT() */
 
 /**
  * @target:            Memory location for the compiled filter
  */
 struct jit_ctx {
-       const struct sk_filter *skf;
+       const struct bpf_prog *skf;
        unsigned int prologue_bytes;
        u32 idx;
        u32 flags;
@@ -153,6 +151,8 @@ static inline int optimize_div(u32 *k)
        return 0;
 }
 
+static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
+
 /* Simply emit the instruction if the JIT memory space has been allocated */
 #define emit_instr(ctx, func, ...)                     \
 do {                                                   \
@@ -166,9 +166,7 @@ do {                                                        \
 /* Determine if immediate is within the 16-bit signed range */
 static inline bool is_range16(s32 imm)
 {
-       if (imm >= SBIT(15) || imm < -SBIT(15))
-               return true;
-       return false;
+       return !(imm >= SBIT(15) || imm < -SBIT(15));
 }
 
 static inline void emit_addu(unsigned int dst, unsigned int src1,
@@ -187,7 +185,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
 {
        if (ctx->target != NULL) {
                /* addiu can only handle s16 */
-               if (is_range16(imm)) {
+               if (!is_range16(imm)) {
                        u32 *p = &ctx->target[ctx->idx];
                        uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
                        p = &ctx->target[ctx->idx + 1];
@@ -199,7 +197,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
        }
        ctx->idx++;
 
-       if (is_range16(imm))
+       if (!is_range16(imm))
                ctx->idx++;
 }
 
@@ -240,7 +238,7 @@ static inline void emit_daddiu(unsigned int dst, unsigned int src,
 static inline void emit_addiu(unsigned int dst, unsigned int src,
                              u32 imm, struct jit_ctx *ctx)
 {
-       if (is_range16(imm)) {
+       if (!is_range16(imm)) {
                emit_load_imm(r_tmp, imm, ctx);
                emit_addu(dst, r_tmp, src, ctx);
        } else {
@@ -313,8 +311,11 @@ static inline void emit_sll(unsigned int dst, unsigned int src,
                            unsigned int sa, struct jit_ctx *ctx)
 {
        /* sa is 5-bits long */
-       BUG_ON(sa >= BIT(5));
-       emit_instr(ctx, sll, dst, src, sa);
+       if (sa >= BIT(5))
+               /* Shifting >= 32 results in zero */
+               emit_jit_reg_move(dst, r_zero, ctx);
+       else
+               emit_instr(ctx, sll, dst, src, sa);
 }
 
 static inline void emit_srlv(unsigned int dst, unsigned int src,
@@ -327,8 +328,17 @@ static inline void emit_srl(unsigned int dst, unsigned int src,
                            unsigned int sa, struct jit_ctx *ctx)
 {
        /* sa is 5-bits long */
-       BUG_ON(sa >= BIT(5));
-       emit_instr(ctx, srl, dst, src, sa);
+       if (sa >= BIT(5))
+               /* Shifting >= 32 results in zero */
+               emit_jit_reg_move(dst, r_zero, ctx);
+       else
+               emit_instr(ctx, srl, dst, src, sa);
+}
+
+static inline void emit_slt(unsigned int dst, unsigned int src1,
+                           unsigned int src2, struct jit_ctx *ctx)
+{
+       emit_instr(ctx, slt, dst, src1, src2);
 }
 
 static inline void emit_sltu(unsigned int dst, unsigned int src1,
@@ -341,7 +351,7 @@ static inline void emit_sltiu(unsigned dst, unsigned int src,
                              unsigned int imm, struct jit_ctx *ctx)
 {
        /* 16 bit immediate */
-       if (is_range16((s32)imm)) {
+       if (!is_range16((s32)imm)) {
                emit_load_imm(r_tmp, imm, ctx);
                emit_sltu(dst, src, r_tmp, ctx);
        } else {
@@ -408,7 +418,7 @@ static inline void emit_div(unsigned int dst, unsigned int src,
                u32 *p = &ctx->target[ctx->idx];
                uasm_i_divu(&p, dst, src);
                p = &ctx->target[ctx->idx + 1];
-               uasm_i_mfhi(&p, dst);
+               uasm_i_mflo(&p, dst);
        }
        ctx->idx += 2; /* 2 insts */
 }
@@ -443,6 +453,17 @@ static inline void emit_wsbh(unsigned int dst, unsigned int src,
        emit_instr(ctx, wsbh, dst, src);
 }
 
+/* load pointer to register */
+static inline void emit_load_ptr(unsigned int dst, unsigned int src,
+                                    int imm, struct jit_ctx *ctx)
+{
+       /* src contains the base addr of the 32/64-pointer */
+       if (config_enabled(CONFIG_64BIT))
+               emit_instr(ctx, ld, dst, imm, src);
+       else
+               emit_instr(ctx, lw, dst, imm, src);
+}
+
 /* load a function pointer to register */
 static inline void emit_load_func(unsigned int reg, ptr imm,
                                  struct jit_ctx *ctx)
@@ -545,29 +566,13 @@ static inline u16 align_sp(unsigned int num)
        return num;
 }
 
-static inline void update_on_xread(struct jit_ctx *ctx)
-{
-       if (!(ctx->flags & SEEN_X))
-               ctx->flags |= FLAG_NEED_X_RESET;
-
-       ctx->flags |= SEEN_X;
-}
-
 static bool is_load_to_a(u16 inst)
 {
        switch (inst) {
-       case BPF_S_LD_W_LEN:
-       case BPF_S_LD_W_ABS:
-       case BPF_S_LD_H_ABS:
-       case BPF_S_LD_B_ABS:
-       case BPF_S_ANC_CPU:
-       case BPF_S_ANC_IFINDEX:
-       case BPF_S_ANC_MARK:
-       case BPF_S_ANC_PROTOCOL:
-       case BPF_S_ANC_RXHASH:
-       case BPF_S_ANC_VLAN_TAG:
-       case BPF_S_ANC_VLAN_TAG_PRESENT:
-       case BPF_S_ANC_QUEUE:
+       case BPF_LD | BPF_W | BPF_LEN:
+       case BPF_LD | BPF_W | BPF_ABS:
+       case BPF_LD | BPF_H | BPF_ABS:
+       case BPF_LD | BPF_B | BPF_ABS:
                return true;
        default:
                return false;
@@ -618,7 +623,10 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
        if (ctx->flags & SEEN_MEM) {
                if (real_off % (RSIZE * 2))
                        real_off += RSIZE;
-               emit_addiu(r_M, r_sp, real_off, ctx);
+               if (config_enabled(CONFIG_64BIT))
+                       emit_daddiu(r_M, r_sp, real_off, ctx);
+               else
+                       emit_addiu(r_M, r_sp, real_off, ctx);
        }
 }
 
@@ -705,11 +713,11 @@ static void build_prologue(struct jit_ctx *ctx)
        if (ctx->flags & SEEN_SKB)
                emit_reg_move(r_skb, MIPS_R_A0, ctx);
 
-       if (ctx->flags & FLAG_NEED_X_RESET)
+       if (ctx->flags & SEEN_X)
                emit_jit_reg_move(r_X, r_zero, ctx);
 
        /* Do not leak kernel data to userspace */
-       if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst)))
+       if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
                emit_jit_reg_move(r_A, r_zero, ctx);
 }
 
@@ -757,13 +765,17 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
        return (u64)err << 32 | ntohl(ret);
 }
 
-#define PKT_TYPE_MAX 7
+#ifdef __BIG_ENDIAN_BITFIELD
+#define PKT_TYPE_MAX   (7 << 5)
+#else
+#define PKT_TYPE_MAX   7
+#endif
 static int pkt_type_offset(void)
 {
        struct sk_buff skb_probe = {
                .pkt_type = ~0,
        };
-       char *ct = (char *)&skb_probe;
+       u8 *ct = (u8 *)&skb_probe;
        unsigned int off;
 
        for (off = 0; off < sizeof(struct sk_buff); off++) {
@@ -777,52 +789,68 @@ static int pkt_type_offset(void)
 static int build_body(struct jit_ctx *ctx)
 {
        void *load_func[] = {jit_get_skb_b, jit_get_skb_h, jit_get_skb_w};
-       const struct sk_filter *prog = ctx->skf;
+       const struct bpf_prog *prog = ctx->skf;
        const struct sock_filter *inst;
        unsigned int i, off, load_order, condt;
        u32 k, b_off __maybe_unused;
 
        for (i = 0; i < prog->len; i++) {
+               u16 code;
+
                inst = &(prog->insns[i]);
                pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
                         __func__, inst->code, inst->jt, inst->jf, inst->k);
                k = inst->k;
+               code = bpf_anc_helper(inst);
 
                if (ctx->target == NULL)
                        ctx->offsets[i] = ctx->idx * 4;
 
-               switch (inst->code) {
-               case BPF_S_LD_IMM:
+               switch (code) {
+               case BPF_LD | BPF_IMM:
                        /* A <- k ==> li r_A, k */
                        ctx->flags |= SEEN_A;
                        emit_load_imm(r_A, k, ctx);
                        break;
-               case BPF_S_LD_W_LEN:
+               case BPF_LD | BPF_W | BPF_LEN:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
                        /* A <- len ==> lw r_A, offset(skb) */
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        off = offsetof(struct sk_buff, len);
                        emit_load(r_A, r_skb, off, ctx);
                        break;
-               case BPF_S_LD_MEM:
+               case BPF_LD | BPF_MEM:
                        /* A <- M[k] ==> lw r_A, offset(M) */
                        ctx->flags |= SEEN_MEM | SEEN_A;
                        emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_LD_W_ABS:
+               case BPF_LD | BPF_W | BPF_ABS:
                        /* A <- P[k:4] */
                        load_order = 2;
                        goto load;
-               case BPF_S_LD_H_ABS:
+               case BPF_LD | BPF_H | BPF_ABS:
                        /* A <- P[k:2] */
                        load_order = 1;
                        goto load;
-               case BPF_S_LD_B_ABS:
+               case BPF_LD | BPF_B | BPF_ABS:
                        /* A <- P[k:1] */
                        load_order = 0;
 load:
+                       /* the interpreter will deal with the negative K */
+                       if ((int)k < 0)
+                               return -ENOTSUPP;
+
                        emit_load_imm(r_off, k, ctx);
 load_common:
+                       /*
+                        * We may got here from the indirect loads so
+                        * return if offset is negative.
+                        */
+                       emit_slt(r_s0, r_off, r_zero, ctx);
+                       emit_bcond(MIPS_COND_NE, r_s0, r_zero,
+                                  b_imm(prog->len, ctx), ctx);
+                       emit_reg_move(r_ret, r_zero, ctx);
+
                        ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
                                SEEN_SKB | SEEN_A;
 
@@ -852,39 +880,42 @@ load_common:
                        emit_b(b_imm(prog->len, ctx), ctx);
                        emit_reg_move(r_ret, r_zero, ctx);
                        break;
-               case BPF_S_LD_W_IND:
+               case BPF_LD | BPF_W | BPF_IND:
                        /* A <- P[X + k:4] */
                        load_order = 2;
                        goto load_ind;
-               case BPF_S_LD_H_IND:
+               case BPF_LD | BPF_H | BPF_IND:
                        /* A <- P[X + k:2] */
                        load_order = 1;
                        goto load_ind;
-               case BPF_S_LD_B_IND:
+               case BPF_LD | BPF_B | BPF_IND:
                        /* A <- P[X + k:1] */
                        load_order = 0;
 load_ind:
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_OFF | SEEN_X;
                        emit_addiu(r_off, r_X, k, ctx);
                        goto load_common;
-               case BPF_S_LDX_IMM:
+               case BPF_LDX | BPF_IMM:
                        /* X <- k */
                        ctx->flags |= SEEN_X;
                        emit_load_imm(r_X, k, ctx);
                        break;
-               case BPF_S_LDX_MEM:
+               case BPF_LDX | BPF_MEM:
                        /* X <- M[k] */
                        ctx->flags |= SEEN_X | SEEN_MEM;
                        emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_LDX_W_LEN:
+               case BPF_LDX | BPF_W | BPF_LEN:
                        /* X <- len */
                        ctx->flags |= SEEN_X | SEEN_SKB;
                        off = offsetof(struct sk_buff, len);
                        emit_load(r_X, r_skb, off, ctx);
                        break;
-               case BPF_S_LDX_B_MSH:
+               case BPF_LDX | BPF_B | BPF_MSH:
+                       /* the interpreter will deal with the negative K */
+                       if ((int)k < 0)
+                               return -ENOTSUPP;
+
                        /* X <- 4 * (P[k:1] & 0xf) */
                        ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
                        /* Load offset to a1 */
@@ -917,50 +948,49 @@ load_ind:
                        emit_b(b_imm(prog->len, ctx), ctx);
                        emit_load_imm(r_ret, 0, ctx); /* delay slot */
                        break;
-               case BPF_S_ST:
+               case BPF_ST:
                        /* M[k] <- A */
                        ctx->flags |= SEEN_MEM | SEEN_A;
                        emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_STX:
+               case BPF_STX:
                        /* M[k] <- X */
                        ctx->flags |= SEEN_MEM | SEEN_X;
                        emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
                        break;
-               case BPF_S_ALU_ADD_K:
+               case BPF_ALU | BPF_ADD | BPF_K:
                        /* A += K */
                        ctx->flags |= SEEN_A;
                        emit_addiu(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_ADD_X:
+               case BPF_ALU | BPF_ADD | BPF_X:
                        /* A += X */
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_addu(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_SUB_K:
+               case BPF_ALU | BPF_SUB | BPF_K:
                        /* A -= K */
                        ctx->flags |= SEEN_A;
                        emit_addiu(r_A, r_A, -k, ctx);
                        break;
-               case BPF_S_ALU_SUB_X:
+               case BPF_ALU | BPF_SUB | BPF_X:
                        /* A -= X */
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_subu(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_MUL_K:
+               case BPF_ALU | BPF_MUL | BPF_K:
                        /* A *= K */
                        /* Load K to scratch register before MUL */
                        ctx->flags |= SEEN_A | SEEN_S0;
                        emit_load_imm(r_s0, k, ctx);
                        emit_mul(r_A, r_A, r_s0, ctx);
                        break;
-               case BPF_S_ALU_MUL_X:
+               case BPF_ALU | BPF_MUL | BPF_X:
                        /* A *= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_mul(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_DIV_K:
+               case BPF_ALU | BPF_DIV | BPF_K:
                        /* A /= k */
                        if (k == 1)
                                break;
@@ -973,7 +1003,7 @@ load_ind:
                        emit_load_imm(r_s0, k, ctx);
                        emit_div(r_A, r_s0, ctx);
                        break;
-               case BPF_S_ALU_MOD_K:
+               case BPF_ALU | BPF_MOD | BPF_K:
                        /* A %= k */
                        if (k == 1 || optimize_div(&k)) {
                                ctx->flags |= SEEN_A;
@@ -984,9 +1014,8 @@ load_ind:
                                emit_mod(r_A, r_s0, ctx);
                        }
                        break;
-               case BPF_S_ALU_DIV_X:
+               case BPF_ALU | BPF_DIV | BPF_X:
                        /* A /= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
                        emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -994,9 +1023,8 @@ load_ind:
                        emit_load_imm(r_val, 0, ctx); /* delay slot */
                        emit_div(r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_MOD_X:
+               case BPF_ALU | BPF_MOD | BPF_X:
                        /* A %= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_X | SEEN_A;
                        /* Check if r_X is zero */
                        emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -1004,94 +1032,89 @@ load_ind:
                        emit_load_imm(r_val, 0, ctx); /* delay slot */
                        emit_mod(r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_OR_K:
+               case BPF_ALU | BPF_OR | BPF_K:
                        /* A |= K */
                        ctx->flags |= SEEN_A;
                        emit_ori(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_OR_X:
+               case BPF_ALU | BPF_OR | BPF_X:
                        /* A |= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A;
                        emit_ori(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_XOR_K:
+               case BPF_ALU | BPF_XOR | BPF_K:
                        /* A ^= k */
                        ctx->flags |= SEEN_A;
                        emit_xori(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ANC_ALU_XOR_X:
-               case BPF_S_ALU_XOR_X:
+               case BPF_ANC | SKF_AD_ALU_XOR_X:
+               case BPF_ALU | BPF_XOR | BPF_X:
                        /* A ^= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A;
                        emit_xor(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_AND_K:
+               case BPF_ALU | BPF_AND | BPF_K:
                        /* A &= K */
                        ctx->flags |= SEEN_A;
                        emit_andi(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_AND_X:
+               case BPF_ALU | BPF_AND | BPF_X:
                        /* A &= X */
-                       update_on_xread(ctx);
                        ctx->flags |= SEEN_A | SEEN_X;
                        emit_and(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_LSH_K:
+               case BPF_ALU | BPF_LSH | BPF_K:
                        /* A <<= K */
                        ctx->flags |= SEEN_A;
                        emit_sll(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_LSH_X:
+               case BPF_ALU | BPF_LSH | BPF_X:
                        /* A <<= X */
                        ctx->flags |= SEEN_A | SEEN_X;
-                       update_on_xread(ctx);
                        emit_sllv(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_RSH_K:
+               case BPF_ALU | BPF_RSH | BPF_K:
                        /* A >>= K */
                        ctx->flags |= SEEN_A;
                        emit_srl(r_A, r_A, k, ctx);
                        break;
-               case BPF_S_ALU_RSH_X:
+               case BPF_ALU | BPF_RSH | BPF_X:
                        ctx->flags |= SEEN_A | SEEN_X;
-                       update_on_xread(ctx);
                        emit_srlv(r_A, r_A, r_X, ctx);
                        break;
-               case BPF_S_ALU_NEG:
+               case BPF_ALU | BPF_NEG:
                        /* A = -A */
                        ctx->flags |= SEEN_A;
                        emit_neg(r_A, ctx);
                        break;
-               case BPF_S_JMP_JA:
+               case BPF_JMP | BPF_JA:
                        /* pc += K */
                        emit_b(b_imm(i + k + 1, ctx), ctx);
                        emit_nop(ctx);
                        break;
-               case BPF_S_JMP_JEQ_K:
+               case BPF_JMP | BPF_JEQ | BPF_K:
                        /* pc += ( A == K ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_EQ | MIPS_COND_K;
                        goto jmp_cmp;
-               case BPF_S_JMP_JEQ_X:
+               case BPF_JMP | BPF_JEQ | BPF_X:
                        ctx->flags |= SEEN_X;
                        /* pc += ( A == X ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_EQ | MIPS_COND_X;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGE_K:
+               case BPF_JMP | BPF_JGE | BPF_K:
                        /* pc += ( A >= K ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GE | MIPS_COND_K;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGE_X:
+               case BPF_JMP | BPF_JGE | BPF_X:
                        ctx->flags |= SEEN_X;
                        /* pc += ( A >= X ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GE | MIPS_COND_X;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGT_K:
+               case BPF_JMP | BPF_JGT | BPF_K:
                        /* pc += ( A > K ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GT | MIPS_COND_K;
                        goto jmp_cmp;
-               case BPF_S_JMP_JGT_X:
+               case BPF_JMP | BPF_JGT | BPF_X:
                        ctx->flags |= SEEN_X;
                        /* pc += ( A > X ) ? pc->jt : pc->jf */
                        condt = MIPS_COND_GT | MIPS_COND_X;
@@ -1109,7 +1132,7 @@ jmp_cmp:
                                }
                                /* A < (K|X) ? r_scrach = 1 */
                                b_off = b_imm(i + inst->jf + 1, ctx);
-                               emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off,
+                               emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
                                           ctx);
                                emit_nop(ctx);
                                /* A > (K|X) ? scratch = 0 */
@@ -1167,7 +1190,7 @@ jmp_cmp:
                                }
                        }
                        break;
-               case BPF_S_JMP_JSET_K:
+               case BPF_JMP | BPF_JSET | BPF_K:
                        ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
                        /* pc += (A & K) ? pc -> jt : pc -> jf */
                        emit_load_imm(r_s1, k, ctx);
@@ -1181,7 +1204,7 @@ jmp_cmp:
                        emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
-               case BPF_S_JMP_JSET_X:
+               case BPF_JMP | BPF_JSET | BPF_X:
                        ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
                        /* pc += (A & X) ? pc -> jt : pc -> jf */
                        emit_and(r_s0, r_A, r_X, ctx);
@@ -1194,7 +1217,7 @@ jmp_cmp:
                        emit_b(b_off, ctx);
                        emit_nop(ctx);
                        break;
-               case BPF_S_RET_A:
+               case BPF_RET | BPF_A:
                        ctx->flags |= SEEN_A;
                        if (i != prog->len - 1)
                                /*
@@ -1204,7 +1227,7 @@ jmp_cmp:
                                emit_b(b_imm(prog->len, ctx), ctx);
                        emit_reg_move(r_ret, r_A, ctx); /* delay slot */
                        break;
-               case BPF_S_RET_K:
+               case BPF_RET | BPF_K:
                        /*
                         * It can emit two instructions so it does not fit on
                         * the delay slot.
@@ -1219,19 +1242,18 @@ jmp_cmp:
                                emit_nop(ctx);
                        }
                        break;
-               case BPF_S_MISC_TAX:
+               case BPF_MISC | BPF_TAX:
                        /* X = A */
                        ctx->flags |= SEEN_X | SEEN_A;
                        emit_jit_reg_move(r_X, r_A, ctx);
                        break;
-               case BPF_S_MISC_TXA:
+               case BPF_MISC | BPF_TXA:
                        /* A = X */
                        ctx->flags |= SEEN_A | SEEN_X;
-                       update_on_xread(ctx);
                        emit_jit_reg_move(r_A, r_X, ctx);
                        break;
                /* AUX */
-               case BPF_S_ANC_PROTOCOL:
+               case BPF_ANC | SKF_AD_PROTOCOL:
                        /* A = ntohs(skb->protocol */
                        ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -1256,7 +1278,7 @@ jmp_cmp:
                        }
 #endif
                        break;
-               case BPF_S_ANC_CPU:
+               case BPF_ANC | SKF_AD_CPU:
                        ctx->flags |= SEEN_A | SEEN_OFF;
                        /* A = current_thread_info()->cpu */
                        BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
@@ -1265,11 +1287,12 @@ jmp_cmp:
                        /* $28/gp points to the thread_info struct */
                        emit_load(r_A, 28, off, ctx);
                        break;
-               case BPF_S_ANC_IFINDEX:
+               case BPF_ANC | SKF_AD_IFINDEX:
                        /* A = skb->dev->ifindex */
                        ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
                        off = offsetof(struct sk_buff, dev);
-                       emit_load(r_s0, r_skb, off, ctx);
+                       /* Load *dev pointer */
+                       emit_load_ptr(r_s0, r_skb, off, ctx);
                        /* error (0) in the delay slot */
                        emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
                                   b_imm(prog->len, ctx), ctx);
@@ -1279,31 +1302,36 @@ jmp_cmp:
                        off = offsetof(struct net_device, ifindex);
                        emit_load(r_A, r_s0, off, ctx);
                        break;
-               case BPF_S_ANC_MARK:
+               case BPF_ANC | SKF_AD_MARK:
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
                        off = offsetof(struct sk_buff, mark);
                        emit_load(r_A, r_skb, off, ctx);
                        break;
-               case BPF_S_ANC_RXHASH:
+               case BPF_ANC | SKF_AD_RXHASH:
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
                        off = offsetof(struct sk_buff, hash);
                        emit_load(r_A, r_skb, off, ctx);
                        break;
-               case BPF_S_ANC_VLAN_TAG:
-               case BPF_S_ANC_VLAN_TAG_PRESENT:
+               case BPF_ANC | SKF_AD_VLAN_TAG:
+               case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                        ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
                                                  vlan_tci) != 2);
                        off = offsetof(struct sk_buff, vlan_tci);
                        emit_half_load(r_s0, r_skb, off, ctx);
-                       if (inst->code == BPF_S_ANC_VLAN_TAG)
-                               emit_and(r_A, r_s0, VLAN_VID_MASK, ctx);
-                       else
-                               emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
+                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                               emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
+                       } else {
+                               emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
+                               /* return 1 if present */
+                               emit_sltu(r_A, r_zero, r_A, ctx);
+                       }
                        break;
-               case BPF_S_ANC_PKTTYPE:
+               case BPF_ANC | SKF_AD_PKTTYPE:
+                       ctx->flags |= SEEN_SKB;
+
                        off = pkt_type_offset();
 
                        if (off < 0)
@@ -1311,8 +1339,12 @@ jmp_cmp:
                        emit_load_byte(r_tmp, r_skb, off, ctx);
                        /* Keep only the last 3 bits */
                        emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
+#ifdef __BIG_ENDIAN_BITFIELD
+                       /* Get the actual packet type to the lower 3 bits */
+                       emit_srl(r_A, r_A, 5, ctx);
+#endif
                        break;
-               case BPF_S_ANC_QUEUE:
+               case BPF_ANC | SKF_AD_QUEUE:
                        ctx->flags |= SEEN_SKB | SEEN_A;
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
                                                  queue_mapping) != 2);
@@ -1322,8 +1354,8 @@ jmp_cmp:
                        emit_half_load(r_A, r_skb, off, ctx);
                        break;
                default:
-                       pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__,
-                               inst->code);
+                       pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
+                                inst->code);
                        return -1;
                }
        }
@@ -1337,7 +1369,7 @@ jmp_cmp:
 
 int bpf_jit_enable __read_mostly;
 
-void bpf_jit_compile(struct sk_filter *fp)
+void bpf_jit_compile(struct bpf_prog *fp)
 {
        struct jit_ctx ctx;
        unsigned int alloc_size, tmp_idx;
@@ -1391,7 +1423,7 @@ out:
        kfree(ctx.offsets);
 }
 
-void bpf_jit_free(struct sk_filter *fp)
+void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
                module_free(NULL, fp->bpf_func);
index a2fa297..f5645d6 100644 (file)
@@ -69,8 +69,6 @@
 #define SA_NOMASK      SA_NODEFER
 #define SA_ONESHOT     SA_RESETHAND
 
-#define SA_RESTORER    0x04000000 /* obsolete -- ignored */
-
 #define MINSIGSTKSZ    2048
 #define SIGSTKSZ       8192
 
index 608716f..af3bc35 100644 (file)
@@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = {
        {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 
        {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 
        {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 
-       {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"},
+       {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
+       {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
        {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 
        {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 
        {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 
index bb9f3b6..93c1963 100644 (file)
@@ -4,6 +4,7 @@
  * Copyright (C) 2000-2001 Hewlett Packard Company
  * Copyright (C) 2000 John Marvin
  * Copyright (C) 2001 Matthew Wilcox
+ * Copyright (C) 2014 Helge Deller <deller@gmx.de>
  *
  * These routines maintain argument size conversion between 32bit and 64bit
  * environment. Based heavily on sys_ia32.c and sys_sparc32.c.
 
 #include <linux/compat.h>
 #include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/fs.h> 
-#include <linux/mm.h> 
-#include <linux/file.h> 
-#include <linux/signal.h>
-#include <linux/resource.h>
-#include <linux/times.h>
-#include <linux/time.h>
-#include <linux/smp.h>
-#include <linux/sem.h>
-#include <linux/shm.h>
-#include <linux/slab.h>
-#include <linux/uio.h>
-#include <linux/ncp_fs.h>
-#include <linux/poll.h>
-#include <linux/personality.h>
-#include <linux/stat.h>
-#include <linux/highmem.h>
-#include <linux/highuid.h>
-#include <linux/mman.h>
-#include <linux/binfmts.h>
-#include <linux/namei.h>
-#include <linux/vfs.h>
-#include <linux/ptrace.h>
-#include <linux/swap.h>
 #include <linux/syscalls.h>
 
-#include <asm/types.h>
-#include <asm/uaccess.h>
-#include <asm/mmu_context.h>
-
-#undef DEBUG
-
-#ifdef DEBUG
-#define DBG(x) printk x
-#else
-#define DBG(x)
-#endif
 
 asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
        int r22, int r21, int r20)
@@ -57,3 +22,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
        current->comm, current->pid, r20);
     return -ENOSYS;
 }
+
+asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
+       compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
+       const char  __user * pathname)
+{
+       return sys_fanotify_mark(fanotify_fd, flags,
+                       ((__u64)mask1 << 32) | mask0,
+                        dfd, pathname);
+}
index c5fa7a6..84c5d3a 100644 (file)
        ENTRY_SAME(accept4)             /* 320 */
        ENTRY_SAME(prlimit64)
        ENTRY_SAME(fanotify_init)
-       ENTRY_COMP(fanotify_mark)
+       ENTRY_DIFF(fanotify_mark)
        ENTRY_COMP(clock_adjtime)
        ENTRY_SAME(name_to_handle_at)   /* 325 */
        ENTRY_COMP(open_by_handle_at)
index ae085ad..0bef864 100644 (file)
@@ -728,7 +728,6 @@ static void __init pagetable_init(void)
 #endif
 
        empty_zero_page = alloc_bootmem_pages(PAGE_SIZE);
-       memset(empty_zero_page, 0, PAGE_SIZE);
 }
 
 static void __init gateway_init(void)
index bd6dd6e..80b94b0 100644 (file)
@@ -145,6 +145,7 @@ config PPC
        select HAVE_IRQ_EXIT_ON_IRQ_STACK
        select ARCH_USE_CMPXCHG_LOCKREF if PPC64
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config GENERIC_CSUM
        def_bool CPU_LITTLE_ENDIAN
@@ -414,7 +415,7 @@ config KEXEC
 config CRASH_DUMP
        bool "Build a kdump crash kernel"
        depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
-       select RELOCATABLE if PPC64 || 44x || FSL_BOOKE
+       select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
        help
          Build a kernel suitable for use as a kdump capture kernel.
          The same kernel binary can be used as production kernel and dump
@@ -1017,6 +1018,7 @@ endmenu
 if PPC64
 config RELOCATABLE
        bool "Build a relocatable kernel"
+       depends on !COMPILE_TEST
        select NONSTATIC_KERNEL
        help
          This builds a kernel image that is capable of running anywhere
index 790352f..35d16bd 100644 (file)
@@ -303,7 +303,6 @@ config PPC_EARLY_DEBUG_OPAL_VTERMNO
          This correspond to which /dev/hvcN you want to use for early
          debug.
 
-         On OPAL v1 (takeover) this should always be 0
          On OPAL v2, this will be 0 for network console and 1 or 2 for
          the machine built-in serial ports.
 
index 37991e1..840a550 100644 (file)
@@ -88,4 +88,15 @@ static inline unsigned long ppc_function_entry(void *func)
 #endif
 }
 
+static inline unsigned long ppc_global_function_entry(void *func)
+{
+#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
+       /* PPC64 ABIv2 the global entry point is at the address */
+       return (unsigned long)func;
+#else
+       /* All other cases there is no change vs ppc_function_entry() */
+       return ppc_function_entry(func);
+#endif
+}
+
 #endif /* _ASM_POWERPC_CODE_PATCHING_H */
index bc23477..0fdd7ee 100644 (file)
@@ -447,6 +447,7 @@ extern const char *powerpc_base_platform;
            CPU_FTR_DBELL | CPU_FTR_HAS_PPR | CPU_FTR_DAWR | \
            CPU_FTR_ARCH_207S | CPU_FTR_TM_COMP)
 #define CPU_FTRS_POWER8E (CPU_FTRS_POWER8 | CPU_FTR_PMAO_BUG)
+#define CPU_FTRS_POWER8_DD1 (CPU_FTRS_POWER8 & ~CPU_FTR_DBELL)
 #define CPU_FTRS_CELL  (CPU_FTR_USE_TB | CPU_FTR_LWSYNC | \
            CPU_FTR_PPCAS_ARCH_V2 | CPU_FTR_CTRL | \
            CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
index fddb72b..d645428 100644 (file)
@@ -198,8 +198,10 @@ static inline unsigned long compute_tlbie_rb(unsigned long v, unsigned long r,
        return rb;
 }
 
-static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+static inline unsigned long __hpte_page_size(unsigned long h, unsigned long l,
+                                            bool is_base_size)
 {
+
        int size, a_psize;
        /* Look at the 8 bit LP value */
        unsigned int lp = (l >> LP_SHIFT) & ((1 << LP_BITS) - 1);
@@ -214,14 +216,27 @@ static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
                                continue;
 
                        a_psize = __hpte_actual_psize(lp, size);
-                       if (a_psize != -1)
+                       if (a_psize != -1) {
+                               if (is_base_size)
+                                       return 1ul << mmu_psize_defs[size].shift;
                                return 1ul << mmu_psize_defs[a_psize].shift;
+                       }
                }
 
        }
        return 0;
 }
 
+static inline unsigned long hpte_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 0);
+}
+
+static inline unsigned long hpte_base_page_size(unsigned long h, unsigned long l)
+{
+       return __hpte_page_size(h, l, 1);
+}
+
 static inline unsigned long hpte_rpn(unsigned long ptel, unsigned long psize)
 {
        return ((ptel & HPTE_R_RPN) & ~(psize - 1)) >> PAGE_SHIFT;
index 807014d..c2b4dcf 100644 (file)
@@ -22,6 +22,7 @@
  */
 #include <asm/pgtable-ppc64.h>
 #include <asm/bug.h>
+#include <asm/processor.h>
 
 /*
  * Segment table
@@ -496,7 +497,7 @@ extern void slb_set_size(u16 size);
  */
 struct subpage_prot_table {
        unsigned long maxaddr;  /* only addresses < this are protected */
-       unsigned int **protptrs[2];
+       unsigned int **protptrs[(TASK_SIZE_USER64 >> 43)];
        unsigned int *low_prot[4];
 };
 
index f8d1d6d..e61f24e 100644 (file)
@@ -19,8 +19,7 @@
 #define MMU_FTR_TYPE_40x               ASM_CONST(0x00000004)
 #define MMU_FTR_TYPE_44x               ASM_CONST(0x00000008)
 #define MMU_FTR_TYPE_FSL_E             ASM_CONST(0x00000010)
-#define MMU_FTR_TYPE_3E                        ASM_CONST(0x00000020)
-#define MMU_FTR_TYPE_47x               ASM_CONST(0x00000040)
+#define MMU_FTR_TYPE_47x               ASM_CONST(0x00000020)
 
 /*
  * This is individual features
                                MMU_FTR_CI_LARGE_PAGE
 #define MMU_FTRS_PA6T          MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
                                MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
-#define MMU_FTRS_A2            MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
-                               MMU_FTR_USE_TLBIVAX_BCAST | \
-                               MMU_FTR_LOCK_BCAST_INVAL | \
-                               MMU_FTR_USE_TLBRSRV | \
-                               MMU_FTR_USE_PAIRED_MAS | \
-                               MMU_FTR_TLBIEL | \
-                               MMU_FTR_16M_PAGE
 #ifndef __ASSEMBLY__
 #include <asm/cputable.h>
 
index 4600188..0da1dbd 100644 (file)
 #ifndef __OPAL_H
 #define __OPAL_H
 
-/****** Takeover interface ********/
-
-/* PAPR H-Call used to querty the HAL existence and/or instanciate
- * it from within pHyp (tech preview only).
- *
- * This is exclusively used in prom_init.c
- */
-
 #ifndef __ASSEMBLY__
-
-struct opal_takeover_args {
-       u64     k_image;                /* r4 */
-       u64     k_size;                 /* r5 */
-       u64     k_entry;                /* r6 */
-       u64     k_entry2;               /* r7 */
-       u64     hal_addr;               /* r8 */
-       u64     rd_image;               /* r9 */
-       u64     rd_size;                /* r10 */
-       u64     rd_loc;                 /* r11 */
-};
-
 /*
  * SG entry
  *
@@ -55,15 +35,6 @@ struct opal_sg_list {
 /* We calculate number of sg entries based on PAGE_SIZE */
 #define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
 
-extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
-
-extern long opal_do_takeover(struct opal_takeover_args *args);
-
-struct rtas_args;
-extern int opal_enter_rtas(struct rtas_args *args,
-                          unsigned long data,
-                          unsigned long entry);
-
 #endif /* __ASSEMBLY__ */
 
 /****** OPAL APIs ******/
index 9ed7371..b3e9360 100644 (file)
@@ -61,8 +61,7 @@ struct power_pmu {
 #define PPMU_SIAR_VALID                0x00000010 /* Processor has SIAR Valid bit */
 #define PPMU_HAS_SSLOT         0x00000020 /* Has sampled slot in MMCRA */
 #define PPMU_HAS_SIER          0x00000040 /* Has SIER */
-#define PPMU_BHRB              0x00000080 /* has BHRB feature enabled */
-#define PPMU_EBB               0x00000100 /* supports event based branch */
+#define PPMU_ARCH_207S         0x00000080 /* PMC is architecture v2.07S */
 
 /*
  * Values for flags to get_alternatives()
index 9ea266e..7e46125 100644 (file)
@@ -277,6 +277,8 @@ n:
        .globl n;       \
 n:
 
+#define _GLOBAL_TOC(name) _GLOBAL(name)
+
 #define _KPROBE(n)     \
        .section ".kprobes.text","a";   \
        .globl  n;      \
index b9bd1ca..96f59de 100644 (file)
@@ -9,10 +9,6 @@
 
 #include <uapi/asm/swab.h>
 
-#ifdef __GNUC__
-#ifndef __powerpc64__
-#endif /* __powerpc64__ */
-
 static __inline__ __u16 ld_le16(const volatile __u16 *addr)
 {
        __u16 val;
@@ -20,19 +16,12 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
        __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
        return val;
 }
-#define __arch_swab16p ld_le16
 
 static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
 {
        __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
 }
 
-static inline void __arch_swab16s(__u16 *addr)
-{
-       st_le16(addr, *addr);
-}
-#define __arch_swab16s __arch_swab16s
-
 static __inline__ __u32 ld_le32(const volatile __u32 *addr)
 {
        __u32 val;
@@ -40,42 +29,10 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
        __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
        return val;
 }
-#define __arch_swab32p ld_le32
 
 static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
 {
        __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
 }
 
-static inline void __arch_swab32s(__u32 *addr)
-{
-       st_le32(addr, *addr);
-}
-#define __arch_swab32s __arch_swab32s
-
-static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
-{
-       __u16 result;
-
-       __asm__("rlwimi %0,%1,8,16,23"
-           : "=r" (result)
-           : "r" (value), "0" (value >> 8));
-       return result;
-}
-#define __arch_swab16 __arch_swab16
-
-static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
-{
-       __u32 result;
-
-       __asm__("rlwimi %0,%1,24,16,23\n\t"
-           "rlwimi %0,%1,8,8,15\n\t"
-           "rlwimi %0,%1,24,0,7"
-           : "=r" (result)
-           : "r" (value), "0" (value >> 24));
-       return result;
-}
-#define __arch_swab32 __arch_swab32
-
-#endif /* __GNUC__ */
 #endif /* _ASM_POWERPC_SWAB_H */
index 965291b..0c15764 100644 (file)
@@ -527,6 +527,26 @@ static struct cpu_spec __initdata cpu_specs[] = {
                .machine_check_early    = __machine_check_early_realmode_p8,
                .platform               = "power8",
        },
+       {       /* Power8 DD1: Does not support doorbell IPIs */
+               .pvr_mask               = 0xffffff00,
+               .pvr_value              = 0x004d0100,
+               .cpu_name               = "POWER8 (raw)",
+               .cpu_features           = CPU_FTRS_POWER8_DD1,
+               .cpu_user_features      = COMMON_USER_POWER8,
+               .cpu_user_features2     = COMMON_USER2_POWER8,
+               .mmu_features           = MMU_FTRS_POWER8,
+               .icache_bsize           = 128,
+               .dcache_bsize           = 128,
+               .num_pmcs               = 6,
+               .pmc_type               = PPC_PMC_IBM,
+               .oprofile_cpu_type      = "ppc64/power8",
+               .oprofile_type          = PPC_OPROFILE_INVALID,
+               .cpu_setup              = __setup_cpu_power8,
+               .cpu_restore            = __restore_cpu_power8,
+               .flush_tlb              = __flush_tlb_power8,
+               .machine_check_early    = __machine_check_early_realmode_p8,
+               .platform               = "power8",
+       },
        {       /* Power8 */
                .pvr_mask               = 0xffff0000,
                .pvr_value              = 0x004d0000,
index f202d07..d178834 100644 (file)
@@ -10,6 +10,8 @@
  *
  */
 
+#define pr_fmt(fmt) "ftrace-powerpc: " fmt
+
 #include <linux/spinlock.h>
 #include <linux/hardirq.h>
 #include <linux/uaccess.h>
@@ -105,7 +107,7 @@ __ftrace_make_nop(struct module *mod,
                  struct dyn_ftrace *rec, unsigned long addr)
 {
        unsigned int op;
-       unsigned long ptr;
+       unsigned long entry, ptr;
        unsigned long ip = rec->ip;
        void *tramp;
 
@@ -115,7 +117,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Make sure that that this is still a 24bit jump */
        if (!is_bl_op(op)) {
-               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               pr_err("Not expected bl: opcode is %x\n", op);
                return -EINVAL;
        }
 
@@ -125,21 +127,21 @@ __ftrace_make_nop(struct module *mod,
        pr_devel("ip:%lx jumps to %p", ip, tramp);
 
        if (!is_module_trampoline(tramp)) {
-               printk(KERN_ERR "Not a trampoline\n");
+               pr_err("Not a trampoline\n");
                return -EINVAL;
        }
 
        if (module_trampoline_target(mod, tramp, &ptr)) {
-               printk(KERN_ERR "Failed to get trampoline target\n");
+               pr_err("Failed to get trampoline target\n");
                return -EFAULT;
        }
 
        pr_devel("trampoline target %lx", ptr);
 
+       entry = ppc_global_function_entry((void *)addr);
        /* This should match what was called */
-       if (ptr != ppc_function_entry((void *)addr)) {
-               printk(KERN_ERR "addr %lx does not match expected %lx\n",
-                       ptr, ppc_function_entry((void *)addr));
+       if (ptr != entry) {
+               pr_err("addr %lx does not match expected %lx\n", ptr, entry);
                return -EINVAL;
        }
 
@@ -179,7 +181,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Make sure that that this is still a 24bit jump */
        if (!is_bl_op(op)) {
-               printk(KERN_ERR "Not expected bl: opcode is %x\n", op);
+               pr_err("Not expected bl: opcode is %x\n", op);
                return -EINVAL;
        }
 
@@ -198,7 +200,7 @@ __ftrace_make_nop(struct module *mod,
 
        /* Find where the trampoline jumps to */
        if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
-               printk(KERN_ERR "Failed to read %lx\n", tramp);
+               pr_err("Failed to read %lx\n", tramp);
                return -EFAULT;
        }
 
@@ -209,7 +211,7 @@ __ftrace_make_nop(struct module *mod,
            ((jmp[1] & 0xffff0000) != 0x398c0000) ||
            (jmp[2] != 0x7d8903a6) ||
            (jmp[3] != 0x4e800420)) {
-               printk(KERN_ERR "Not a trampoline\n");
+               pr_err("Not a trampoline\n");
                return -EINVAL;
        }
 
@@ -221,8 +223,7 @@ __ftrace_make_nop(struct module *mod,
        pr_devel(" %lx ", tramp);
 
        if (tramp != addr) {
-               printk(KERN_ERR
-                      "Trampoline location %08lx does not match addr\n",
+               pr_err("Trampoline location %08lx does not match addr\n",
                       tramp);
                return -EINVAL;
        }
@@ -263,15 +264,13 @@ int ftrace_make_nop(struct module *mod,
         */
        if (!rec->arch.mod) {
                if (!mod) {
-                       printk(KERN_ERR "No module loaded addr=%lx\n",
-                              addr);
+                       pr_err("No module loaded addr=%lx\n", addr);
                        return -EFAULT;
                }
                rec->arch.mod = mod;
        } else if (mod) {
                if (mod != rec->arch.mod) {
-                       printk(KERN_ERR
-                              "Record mod %p not equal to passed in mod %p\n",
+                       pr_err("Record mod %p not equal to passed in mod %p\n",
                               rec->arch.mod, mod);
                        return -EINVAL;
                }
@@ -307,26 +306,25 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
         * The load offset is different depending on the ABI. For simplicity
         * just mask it out when doing the compare.
         */
-       if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) {
-               printk(KERN_ERR "Unexpected call sequence: %x %x\n",
-                       op[0], op[1]);
+       if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
+               pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
                return -EINVAL;
        }
 
        /* If we never set up a trampoline to ftrace_caller, then bail */
        if (!rec->arch.mod->arch.tramp) {
-               printk(KERN_ERR "No ftrace trampoline\n");
+               pr_err("No ftrace trampoline\n");
                return -EINVAL;
        }
 
        /* Ensure branch is within 24 bits */
-       if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
-               printk(KERN_ERR "Branch out of range");
+       if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
+               pr_err("Branch out of range\n");
                return -EINVAL;
        }
 
        if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
-               printk(KERN_ERR "REL24 out of range!\n");
+               pr_err("REL24 out of range!\n");
                return -EINVAL;
        }
 
@@ -345,13 +343,13 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
 
        /* It should be pointing to a nop */
        if (op != PPC_INST_NOP) {
-               printk(KERN_ERR "Expected NOP but have %x\n", op);
+               pr_err("Expected NOP but have %x\n", op);
                return -EINVAL;
        }
 
        /* If we never set up a trampoline to ftrace_caller, then bail */
        if (!rec->arch.mod->arch.tramp) {
-               printk(KERN_ERR "No ftrace trampoline\n");
+               pr_err("No ftrace trampoline\n");
                return -EINVAL;
        }
 
@@ -359,7 +357,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
        op = create_branch((unsigned int *)ip,
                           rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
        if (!op) {
-               printk(KERN_ERR "REL24 out of range!\n");
+               pr_err("REL24 out of range!\n");
                return -EINVAL;
        }
 
@@ -397,7 +395,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
         * already have a module defined.
         */
        if (!rec->arch.mod) {
-               printk(KERN_ERR "No module loaded\n");
+               pr_err("No module loaded\n");
                return -EINVAL;
        }
 
index 2480256..5cf3d36 100644 (file)
@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
 
 _GLOBAL(power7_sleep)
        li      r3,1
-       li      r4,0
+       li      r4,1
        b       power7_powersave_common
        /* No return */
 
index b82227e..12e48d5 100644 (file)
@@ -23,7 +23,7 @@ unsigned int ioread16(void __iomem *addr)
 }
 unsigned int ioread16be(void __iomem *addr)
 {
-       return in_be16(addr);
+       return readw_be(addr);
 }
 unsigned int ioread32(void __iomem *addr)
 {
@@ -31,7 +31,7 @@ unsigned int ioread32(void __iomem *addr)
 }
 unsigned int ioread32be(void __iomem *addr)
 {
-       return in_be32(addr);
+       return readl_be(addr);
 }
 EXPORT_SYMBOL(ioread8);
 EXPORT_SYMBOL(ioread16);
@@ -49,7 +49,7 @@ void iowrite16(u16 val, void __iomem *addr)
 }
 void iowrite16be(u16 val, void __iomem *addr)
 {
-       out_be16(addr, val);
+       writew_be(val, addr);
 }
 void iowrite32(u32 val, void __iomem *addr)
 {
@@ -57,7 +57,7 @@ void iowrite32(u32 val, void __iomem *addr)
 }
 void iowrite32be(u32 val, void __iomem *addr)
 {
-       out_be32(addr, val);
+       writel_be(val, addr);
 }
 EXPORT_SYMBOL(iowrite8);
 EXPORT_SYMBOL(iowrite16);
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(iowrite32be);
  */
 void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insb((u8 __iomem *) addr, dst, count);
+       readsb(addr, dst, count);
 }
 void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insw_ns((u16 __iomem *) addr, dst, count);
+       readsw(addr, dst, count);
 }
 void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
 {
-       _insl_ns((u32 __iomem *) addr, dst, count);
+       readsl(addr, dst, count);
 }
 EXPORT_SYMBOL(ioread8_rep);
 EXPORT_SYMBOL(ioread16_rep);
@@ -91,15 +91,15 @@ EXPORT_SYMBOL(ioread32_rep);
 
 void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsb((u8 __iomem *) addr, src, count);
+       writesb(addr, src, count);
 }
 void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsw_ns((u16 __iomem *) addr, src, count);
+       writesw(addr, src, count);
 }
 void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
 {
-       _outsl_ns((u32 __iomem *) addr, src, count);
+       writesl(addr, src, count);
 }
 EXPORT_SYMBOL(iowrite8_rep);
 EXPORT_SYMBOL(iowrite16_rep);
index 90fab64..2f72af8 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/module.h>
 #include <linux/kdebug.h>
 #include <linux/slab.h>
+#include <asm/code-patching.h>
 #include <asm/cacheflush.h>
 #include <asm/sstep.h>
 #include <asm/uaccess.h>
@@ -491,12 +492,10 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
        return ret;
 }
 
-#ifdef CONFIG_PPC64
 unsigned long arch_deref_entry_point(void *entry)
 {
-       return ((func_descr_t *)entry)->entry;
+       return ppc_global_function_entry(entry);
 }
-#endif
 
 int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
 {
@@ -508,7 +507,11 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
        /* setup return addr to the jprobe handler routine */
        regs->nip = arch_deref_entry_point(jp->entry);
 #ifdef CONFIG_PPC64
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+       regs->gpr[12] = (unsigned long)jp->entry;
+#else
        regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
+#endif
 #endif
 
        return 1;
index 077d2ce..d807ee6 100644 (file)
@@ -315,8 +315,17 @@ static void dedotify_versions(struct modversion_info *vers,
        struct modversion_info *end;
 
        for (end = (void *)vers + size; vers < end; vers++)
-               if (vers->name[0] == '.')
+               if (vers->name[0] == '.') {
                        memmove(vers->name, vers->name+1, strlen(vers->name));
+#ifdef ARCH_RELOCATES_KCRCTAB
+                       /* The TOC symbol has no CRC computed. To avoid CRC
+                        * check failing, we must force it to the expected
+                        * value (see CRC check in module.c).
+                        */
+                       if (!strcmp(vers->name, "TOC."))
+                               vers->crc = -(unsigned long)reloc_start;
+#endif
+               }
 }
 
 /* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
index 613a860..b694b07 100644 (file)
@@ -662,13 +662,6 @@ void __init early_init_devtree(void *params)
        of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
 #endif
 
-       /* Pre-initialize the cmd_line with the content of boot_commmand_line,
-        * which will be empty except when the content of the variable has
-        * been overriden by a bootloading mechanism. This happens typically
-        * with HAL takeover
-        */
-       strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
-
        /* Retrieve various informations from the /chosen node of the
         * device-tree, including the platform type, initrd location and
         * size, TCE reserve, and more ...
index 078145a..1a85d8f 100644 (file)
@@ -1268,201 +1268,6 @@ static u64 __initdata prom_opal_base;
 static u64 __initdata prom_opal_entry;
 #endif
 
-#ifdef __BIG_ENDIAN__
-/* XXX Don't change this structure without updating opal-takeover.S */
-static struct opal_secondary_data {
-       s64                             ack;    /*  0 */
-       u64                             go;     /*  8 */
-       struct opal_takeover_args       args;   /* 16 */
-} opal_secondary_data;
-
-static u64 __initdata prom_opal_align;
-static u64 __initdata prom_opal_size;
-static int __initdata prom_rtas_start_cpu;
-static u64 __initdata prom_rtas_data;
-static u64 __initdata prom_rtas_entry;
-
-extern char opal_secondary_entry;
-
-static void __init prom_query_opal(void)
-{
-       long rc;
-
-       /* We must not query for OPAL presence on a machine that
-        * supports TNK takeover (970 blades), as this uses the same
-        * h-call with different arguments and will crash
-        */
-       if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
-                                   ADDR("/tnk-memory-map")))) {
-               prom_printf("TNK takeover detected, skipping OPAL check\n");
-               return;
-       }
-
-       prom_printf("Querying for OPAL presence... ");
-
-       rc = opal_query_takeover(&prom_opal_size,
-                                &prom_opal_align);
-       prom_debug("(rc = %ld) ", rc);
-       if (rc != 0) {
-               prom_printf("not there.\n");
-               return;
-       }
-       of_platform = PLATFORM_OPAL;
-       prom_printf(" there !\n");
-       prom_debug("  opal_size  = 0x%lx\n", prom_opal_size);
-       prom_debug("  opal_align = 0x%lx\n", prom_opal_align);
-       if (prom_opal_align < 0x10000)
-               prom_opal_align = 0x10000;
-}
-
-static int __init prom_rtas_call(int token, int nargs, int nret,
-                                int *outputs, ...)
-{
-       struct rtas_args rtas_args;
-       va_list list;
-       int i;
-
-       rtas_args.token = token;
-       rtas_args.nargs = nargs;
-       rtas_args.nret  = nret;
-       rtas_args.rets  = (rtas_arg_t *)&(rtas_args.args[nargs]);
-       va_start(list, outputs);
-       for (i = 0; i < nargs; ++i)
-               rtas_args.args[i] = va_arg(list, rtas_arg_t);
-       va_end(list);
-
-       for (i = 0; i < nret; ++i)
-               rtas_args.rets[i] = 0;
-
-       opal_enter_rtas(&rtas_args, prom_rtas_data,
-                       prom_rtas_entry);
-
-       if (nret > 1 && outputs != NULL)
-               for (i = 0; i < nret-1; ++i)
-                       outputs[i] = rtas_args.rets[i+1];
-       return (nret > 0)? rtas_args.rets[0]: 0;
-}
-
-static void __init prom_opal_hold_cpus(void)
-{
-       int i, cnt, cpu, rc;
-       long j;
-       phandle node;
-       char type[64];
-       u32 servers[8];
-       void *entry = (unsigned long *)&opal_secondary_entry;
-       struct opal_secondary_data *data = &opal_secondary_data;
-
-       prom_debug("prom_opal_hold_cpus: start...\n");
-       prom_debug("    - entry       = 0x%x\n", entry);
-       prom_debug("    - data        = 0x%x\n", data);
-
-       data->ack = -1;
-       data->go = 0;
-
-       /* look for cpus */
-       for (node = 0; prom_next_node(&node); ) {
-               type[0] = 0;
-               prom_getprop(node, "device_type", type, sizeof(type));
-               if (strcmp(type, "cpu") != 0)
-                       continue;
-
-               /* Skip non-configured cpus. */
-               if (prom_getprop(node, "status", type, sizeof(type)) > 0)
-                       if (strcmp(type, "okay") != 0)
-                               continue;
-
-               cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
-                            sizeof(servers));
-               if (cnt == PROM_ERROR)
-                       break;
-               cnt >>= 2;
-               for (i = 0; i < cnt; i++) {
-                       cpu = servers[i];
-                       prom_debug("CPU %d ... ", cpu);
-                       if (cpu == prom.cpu) {
-                               prom_debug("booted !\n");
-                               continue;
-                       }
-                       prom_debug("starting ... ");
-
-                       /* Init the acknowledge var which will be reset by
-                        * the secondary cpu when it awakens from its OF
-                        * spinloop.
-                        */
-                       data->ack = -1;
-                       rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
-                                           NULL, cpu, entry, data);
-                       prom_debug("rtas rc=%d ...", rc);
-
-                       for (j = 0; j < 100000000 && data->ack == -1; j++) {
-                               HMT_low();
-                               mb();
-                       }
-                       HMT_medium();
-                       if (data->ack != -1)
-                               prom_debug("done, PIR=0x%x\n", data->ack);
-                       else
-                               prom_debug("timeout !\n");
-               }
-       }
-       prom_debug("prom_opal_hold_cpus: end...\n");
-}
-
-static void __init prom_opal_takeover(void)
-{
-       struct opal_secondary_data *data = &opal_secondary_data;
-       struct opal_takeover_args *args = &data->args;
-       u64 align = prom_opal_align;
-       u64 top_addr, opal_addr;
-
-       args->k_image   = (u64)_stext;
-       args->k_size    = _end - _stext;
-       args->k_entry   = 0;
-       args->k_entry2  = 0x60;
-
-       top_addr = _ALIGN_UP(args->k_size, align);
-
-       if (prom_initrd_start != 0) {
-               args->rd_image = prom_initrd_start;
-               args->rd_size = prom_initrd_end - args->rd_image;
-               args->rd_loc = top_addr;
-               top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
-       }
-
-       /* Pickup an address for the HAL. We want to go really high
-        * up to avoid problem with future kexecs. On the other hand
-        * we don't want to be all over the TCEs on P5IOC2 machines
-        * which are going to be up there too. We assume the machine
-        * has plenty of memory, and we ask for the HAL for now to
-        * be just below the 1G point, or above the initrd
-        */
-       opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
-       if (opal_addr < top_addr)
-               opal_addr = top_addr;
-       args->hal_addr = opal_addr;
-
-       /* Copy the command line to the kernel image */
-       strlcpy(boot_command_line, prom_cmd_line,
-               COMMAND_LINE_SIZE);
-
-       prom_debug("  k_image    = 0x%lx\n", args->k_image);
-       prom_debug("  k_size     = 0x%lx\n", args->k_size);
-       prom_debug("  k_entry    = 0x%lx\n", args->k_entry);
-       prom_debug("  k_entry2   = 0x%lx\n", args->k_entry2);
-       prom_debug("  hal_addr   = 0x%lx\n", args->hal_addr);
-       prom_debug("  rd_image   = 0x%lx\n", args->rd_image);
-       prom_debug("  rd_size    = 0x%lx\n", args->rd_size);
-       prom_debug("  rd_loc     = 0x%lx\n", args->rd_loc);
-       prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
-       prom_close_stdin();
-       mb();
-       data->go = 1;
-       for (;;)
-               opal_do_takeover(args);
-}
-#endif /* __BIG_ENDIAN__ */
-
 /*
  * Allocate room for and instantiate OPAL
  */
@@ -1597,12 +1402,6 @@ static void __init prom_instantiate_rtas(void)
                         &val, sizeof(val)) != PROM_ERROR)
                rtas_has_query_cpu_stopped = true;
 
-#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
-       /* PowerVN takeover hack */
-       prom_rtas_data = base;
-       prom_rtas_entry = entry;
-       prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
-#endif
        prom_debug("rtas base     = 0x%x\n", base);
        prom_debug("rtas entry    = 0x%x\n", entry);
        prom_debug("rtas size     = 0x%x\n", (long)size);
@@ -3027,16 +2826,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
                prom_instantiate_rtas();
 
 #ifdef CONFIG_PPC_POWERNV
-#ifdef __BIG_ENDIAN__
-       /* Detect HAL and try instanciating it & doing takeover */
-       if (of_platform == PLATFORM_PSERIES_LPAR) {
-               prom_query_opal();
-               if (of_platform == PLATFORM_OPAL) {
-                       prom_opal_hold_cpus();
-                       prom_opal_takeover();
-               }
-       } else
-#endif /* __BIG_ENDIAN__ */
        if (of_platform == PLATFORM_OPAL)
                prom_instantiate_opal();
 #endif /* CONFIG_PPC_POWERNV */
index 77aa1e9..fe8e54b 100644 (file)
@@ -21,9 +21,7 @@ _end enter_prom memcpy memset reloc_offset __secondary_hold
 __secondary_hold_acknowledge __secondary_hold_spinloop __start
 strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
 reloc_got2 kernstart_addr memstart_addr linux_banner _stext
-opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry
-boot_command_line __prom_init_toc_start __prom_init_toc_end
-btext_setup_display TOC."
+__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
 
 NM="$1"
 OBJ="$2"
index 658e89d..db2b482 100644 (file)
@@ -611,17 +611,19 @@ static void rtas_flash_firmware(int reboot_type)
        for (f = flist; f; f = next) {
                /* Translate data addrs to absolute */
                for (i = 0; i < f->num_blocks; i++) {
-                       f->blocks[i].data = (char *)__pa(f->blocks[i].data);
+                       f->blocks[i].data = (char *)cpu_to_be64(__pa(f->blocks[i].data));
                        image_size += f->blocks[i].length;
+                       f->blocks[i].length = cpu_to_be64(f->blocks[i].length);
                }
                next = f->next;
                /* Don't translate NULL pointer for last entry */
                if (f->next)
-                       f->next = (struct flash_block_list *)__pa(f->next);
+                       f->next = (struct flash_block_list *)cpu_to_be64(__pa(f->next));
                else
                        f->next = NULL;
                /* make num_blocks into the version/length field */
                f->num_blocks = (FLASH_BLOCK_LIST_VERSION << 56) | ((f->num_blocks+1)*16);
+               f->num_blocks = cpu_to_be64(f->num_blocks);
        }
 
        printk(KERN_ALERT "FLASH: flash image is %ld bytes\n", image_size);
index e239df3..e5b022c 100644 (file)
@@ -469,9 +469,17 @@ void __init smp_setup_cpu_maps(void)
                }
 
                for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
+                       bool avail;
+
                        DBG("    thread %d -> cpu %d (hard id %d)\n",
                            j, cpu, be32_to_cpu(intserv[j]));
-                       set_cpu_present(cpu, of_device_is_available(dn));
+
+                       avail = of_device_is_available(dn);
+                       if (!avail)
+                               avail = !of_property_match_string(dn,
+                                               "enable-method", "spin-table");
+
+                       set_cpu_present(cpu, avail);
                        set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
                        set_cpu_possible(cpu, true);
                        cpu++;
index 4e47db6..1bc5a17 100644 (file)
@@ -54,7 +54,6 @@
 
 #include "signal.h"
 
-#undef DEBUG_SIG
 
 #ifdef CONFIG_PPC64
 #define sys_rt_sigreturn       compat_sys_rt_sigreturn
@@ -1063,10 +1062,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
        return 1;
 
 badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(KERN_INFO
                                   "%s[%d]: bad frame in handle_rt_signal32: "
@@ -1484,10 +1479,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
        return 1;
 
 badframe:
-#ifdef DEBUG_SIG
-       printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(KERN_INFO
                                   "%s[%d]: bad frame in handle_signal32: "
index d501dc4..97c1e4b 100644 (file)
@@ -38,7 +38,6 @@
 
 #include "signal.h"
 
-#define DEBUG_SIG 0
 
 #define GP_REGS_SIZE   min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
 #define FP_REGS_SIZE   sizeof(elf_fpregset_t)
@@ -700,10 +699,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
        return 0;
 
 badframe:
-#if DEBUG_SIG
-       printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
-              regs, uc, &uc->uc_mcontext);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
                                   current->comm, current->pid, "rt_sigreturn",
@@ -809,10 +804,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
        return 1;
 
 badframe:
-#if DEBUG_SIG
-       printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
-              regs, frame, newsp);
-#endif
        if (show_unhandled_signals)
                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
                                   current->comm, current->pid, "setup_rt_frame",
index 51a3ff7..1007fb8 100644 (file)
@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
 
 #ifdef CONFIG_SCHED_SMT
 /* cpumask of CPUs with asymetric SMT dependancy */
-static const int powerpc_smt_flags(void)
+static int powerpc_smt_flags(void)
 {
        int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 
index 8056107..68468d6 100644 (file)
@@ -1562,7 +1562,7 @@ static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
                                goto out;
                        }
                        if (!rma_setup && is_vrma_hpte(v)) {
-                               unsigned long psize = hpte_page_size(v, r);
+                               unsigned long psize = hpte_base_page_size(v, r);
                                unsigned long senc = slb_pgsize_encoding(psize);
                                unsigned long lpcr;
 
index 8c86422..731be74 100644 (file)
@@ -127,11 +127,6 @@ BEGIN_FTR_SECTION
        stw     r10, HSTATE_PMC + 24(r13)
        stw     r11, HSTATE_PMC + 28(r13)
 END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
-BEGIN_FTR_SECTION
-       mfspr   r9, SPRN_SIER
-       std     r8, HSTATE_MMCR + 40(r13)
-       std     r9, HSTATE_MMCR + 48(r13)
-END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 31:
 
        /*
index 6e62243..5a24d3c 100644 (file)
@@ -814,13 +814,10 @@ long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
                        r = hpte[i+1];
 
                        /*
-                        * Check the HPTE again, including large page size
-                        * Since we don't currently allow any MPSS (mixed
-                        * page-size segment) page sizes, it is sufficient
-                        * to check against the actual page size.
+                        * Check the HPTE again, including base page size
                         */
                        if ((v & valid) && (v & mask) == val &&
-                           hpte_page_size(v, r) == (1ul << pshift))
+                           hpte_base_page_size(v, r) == (1ul << pshift))
                                /* Return with the HPTE still locked */
                                return (hash << 3) + (i >> 1);
 
index 868347e..558a67d 100644 (file)
@@ -48,7 +48,7 @@
  *
  * LR = return address to continue at after eventually re-enabling MMU
  */
-_GLOBAL(kvmppc_hv_entry_trampoline)
+_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
        mflr    r0
        std     r0, PPC_LR_STKOFF(r1)
        stdu    r1, -112(r1)
index e2c29e3..d044b8b 100644 (file)
 #include <asm/exception-64s.h>
 
 #if defined(CONFIG_PPC_BOOK3S_64)
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 #define GET_SHADOW_VCPU(reg)    addi   reg, r13, PACA_SVCPU
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
index 9eec675..16c4d88 100644 (file)
 
 #if defined(CONFIG_PPC_BOOK3S_64)
 
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+#define FUNC(name)             name
+#else
 #define FUNC(name)             GLUE(.,name)
+#endif
 
 #elif defined(CONFIG_PPC_BOOK3S_32)
 
@@ -146,7 +150,7 @@ kvmppc_handler_skip_ins:
  * On entry, r4 contains the guest shadow MSR
  * MSR.EE has to be 0 when calling this function
  */
-_GLOBAL(kvmppc_entry_trampoline)
+_GLOBAL_TOC(kvmppc_entry_trampoline)
        mfmsr   r5
        LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
        toreal(r7)
index edb14ba..ef27fbd 100644 (file)
@@ -23,20 +23,20 @@ static void kvm_rtas_set_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 3 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 3 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
-       server = args->args[1];
-       priority = args->args[2];
+       irq = be32_to_cpu(args->args[0]);
+       server = be32_to_cpu(args->args[1]);
+       priority = be32_to_cpu(args->args[2]);
 
        rc = kvmppc_xics_set_xive(vcpu->kvm, irq, server, priority);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -44,12 +44,12 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq, server, priority;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 3) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 3) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        server = priority = 0;
        rc = kvmppc_xics_get_xive(vcpu->kvm, irq, &server, &priority);
@@ -58,10 +58,10 @@ static void kvm_rtas_get_xive(struct kvm_vcpu *vcpu, struct rtas_args *args)
                goto out;
        }
 
-       args->rets[1] = server;
-       args->rets[2] = priority;
+       args->rets[1] = cpu_to_be32(server);
+       args->rets[2] = cpu_to_be32(priority);
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -69,18 +69,18 @@ static void kvm_rtas_int_off(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_off(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 
 static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
@@ -88,18 +88,18 @@ static void kvm_rtas_int_on(struct kvm_vcpu *vcpu, struct rtas_args *args)
        u32 irq;
        int rc;
 
-       if (args->nargs != 1 || args->nret != 1) {
+       if (be32_to_cpu(args->nargs) != 1 || be32_to_cpu(args->nret) != 1) {
                rc = -3;
                goto out;
        }
 
-       irq = args->args[0];
+       irq = be32_to_cpu(args->args[0]);
 
        rc = kvmppc_xics_int_on(vcpu->kvm, irq);
        if (rc)
                rc = -3;
 out:
-       args->rets[0] = rc;
+       args->rets[0] = cpu_to_be32(rc);
 }
 #endif /* CONFIG_KVM_XICS */
 
@@ -205,32 +205,6 @@ int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp)
        return rc;
 }
 
-static void kvmppc_rtas_swap_endian_in(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       args->token = be32_to_cpu(args->token);
-       args->nargs = be32_to_cpu(args->nargs);
-       args->nret = be32_to_cpu(args->nret);
-       for (i = 0; i < args->nargs; i++)
-               args->args[i] = be32_to_cpu(args->args[i]);
-#endif
-}
-
-static void kvmppc_rtas_swap_endian_out(struct rtas_args *args)
-{
-#ifdef __LITTLE_ENDIAN__
-       int i;
-
-       for (i = 0; i < args->nret; i++)
-               args->args[i] = cpu_to_be32(args->args[i]);
-       args->token = cpu_to_be32(args->token);
-       args->nargs = cpu_to_be32(args->nargs);
-       args->nret = cpu_to_be32(args->nret);
-#endif
-}
-
 int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 {
        struct rtas_token_definition *d;
@@ -249,8 +223,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
        if (rc)
                goto fail;
 
-       kvmppc_rtas_swap_endian_in(&args);
-
        /*
         * args->rets is a pointer into args->args. Now that we've
         * copied args we need to fix it up to point into our copy,
@@ -258,13 +230,13 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
         * value so we can restore it on the way out.
         */
        orig_rets = args.rets;
-       args.rets = &args.args[args.nargs];
+       args.rets = &args.args[be32_to_cpu(args.nargs)];
 
        mutex_lock(&vcpu->kvm->lock);
 
        rc = -ENOENT;
        list_for_each_entry(d, &vcpu->kvm->arch.rtas_tokens, list) {
-               if (d->token == args.token) {
+               if (d->token == be32_to_cpu(args.token)) {
                        d->handler->handler(vcpu, &args);
                        rc = 0;
                        break;
@@ -275,7 +247,6 @@ int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu)
 
        if (rc == 0) {
                args.rets = orig_rets;
-               kvmppc_rtas_swap_endian_out(&args);
                rc = kvm_write_guest(vcpu->kvm, args_phys, &args, sizeof(args));
                if (rc)
                        goto fail;
index dd2cc03..86903d3 100644 (file)
@@ -473,7 +473,8 @@ static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
                if (printk_ratelimit())
                        pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
                                __func__, (long)gfn, pfn);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
 
index 0738f96..43435c6 100644 (file)
@@ -77,7 +77,7 @@ _GLOBAL(memset)
        stb     r4,0(r6)
        blr
 
-_GLOBAL(memmove)
+_GLOBAL_TOC(memmove)
        cmplw   0,r3,r4
        bgt     backwards_memcpy
        b       memcpy
index 412dd46..5c09f36 100644 (file)
@@ -1198,7 +1198,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x3f;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 32 ? sh : 31);
-                       if (ival < 0 && (sh >= 32 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 32 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1208,7 +1208,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb;
                        ival = (signed int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1216,7 +1216,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
 
 #ifdef __powerpc64__
                case 27:        /* sld */
-                       sh = regs->gpr[rd] & 0x7f;
+                       sh = regs->gpr[rb] & 0x7f;
                        if (sh < 64)
                                regs->gpr[ra] = regs->gpr[rd] << sh;
                        else
@@ -1235,7 +1235,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = regs->gpr[rb] & 0x7f;
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> (sh < 64 ? sh : 63);
-                       if (ival < 0 && (sh >= 64 || (ival & ((1 << sh) - 1)) != 0))
+                       if (ival < 0 && (sh >= 64 || (ival & ((1ul << sh) - 1)) != 0))
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
@@ -1246,7 +1246,7 @@ int __kprobes emulate_step(struct pt_regs *regs, unsigned int instr)
                        sh = rb | ((instr & 2) << 4);
                        ival = (signed long int) regs->gpr[rd];
                        regs->gpr[ra] = ival >> sh;
-                       if (ival < 0 && (ival & ((1 << sh) - 1)) != 0)
+                       if (ival < 0 && (ival & ((1ul << sh) - 1)) != 0)
                                regs->xer |= XER_CA;
                        else
                                regs->xer &= ~XER_CA;
index af3d78e..928ebe7 100644 (file)
@@ -410,17 +410,7 @@ void __init mmu_context_init(void)
        } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
                first_context = 1;
                last_context = 65535;
-       } else
-#ifdef CONFIG_PPC_BOOK3E_MMU
-       if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
-               u32 mmucfg = mfspr(SPRN_MMUCFG);
-               u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
-                               >> MMUCFG_PIDSIZE_SHIFT;
-               first_context = 1;
-               last_context = (1UL << (pid_bits + 1)) - 1;
-       } else
-#endif
-       {
+       } else {
                first_context = 1;
                last_context = 255;
        }
index 6dcdade..3afa6f4 100644 (file)
@@ -25,7 +25,7 @@ static inline void bpf_flush_icache(void *start, void *end)
        flush_icache_range((unsigned long)start, (unsigned long)end);
 }
 
-static void bpf_jit_build_prologue(struct sk_filter *fp, u32 *image,
+static void bpf_jit_build_prologue(struct bpf_prog *fp, u32 *image,
                                   struct codegen_context *ctx)
 {
        int i;
@@ -121,7 +121,7 @@ static void bpf_jit_build_epilogue(u32 *image, struct codegen_context *ctx)
        ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
 
 /* Assemble the body code between the prologue & epilogue. */
-static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
+static int bpf_jit_build_body(struct bpf_prog *fp, u32 *image,
                              struct codegen_context *ctx,
                              unsigned int *addrs)
 {
@@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
                case BPF_ANC | SKF_AD_VLAN_TAG:
                case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
+                       BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
+
                        PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
                                                          vlan_tci));
-                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
-                               PPC_ANDI(r_A, r_A, VLAN_VID_MASK);
-                       else
+                       if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
+                               PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
+                       } else {
                                PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
+                               PPC_SRWI(r_A, r_A, 12);
+                       }
                        break;
                case BPF_ANC | SKF_AD_QUEUE:
                        BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -565,7 +569,7 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
        return 0;
 }
 
-void bpf_jit_compile(struct sk_filter *fp)
+void bpf_jit_compile(struct bpf_prog *fp)
 {
        unsigned int proglen;
        unsigned int alloclen;
@@ -689,7 +693,7 @@ out:
        return;
 }
 
-void bpf_jit_free(struct sk_filter *fp)
+void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
                module_free(NULL, fp->bpf_func);
index 4520c93..fe52db2 100644 (file)
@@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event)
         * check that the PMU supports EBB, meaning those that don't can still
         * use bit 63 of the event code for something else if they wish.
         */
-       return (ppmu->flags & PPMU_EBB) &&
+       return (ppmu->flags & PPMU_ARCH_207S) &&
               ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
 }
 
@@ -777,7 +777,7 @@ void perf_event_print_debug(void)
        if (ppmu->flags & PPMU_HAS_SIER)
                sier = mfspr(SPRN_SIER);
 
-       if (ppmu->flags & PPMU_EBB) {
+       if (ppmu->flags & PPMU_ARCH_207S) {
                pr_info("MMCR2: %016lx EBBHR: %016lx\n",
                        mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
                pr_info("EBBRR: %016lx BESCR: %016lx\n",
@@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event)
        } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
 
        local64_add(delta, &event->count);
-       local64_sub(delta, &event->hw.period_left);
+
+       /*
+        * A number of places program the PMC with (0x80000000 - period_left).
+        * We never want period_left to be less than 1 because we will program
+        * the PMC with a value >= 0x800000000 and an edge detected PMC will
+        * roll around to 0 before taking an exception. We have seen this
+        * on POWER8.
+        *
+        * To fix this, clamp the minimum value of period_left to 1.
+        */
+       do {
+               prev = local64_read(&event->hw.period_left);
+               val = prev - delta;
+               if (val < 1)
+                       val = 1;
+       } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
 }
 
 /*
@@ -1292,6 +1307,9 @@ static void power_pmu_enable(struct pmu *pmu)
  out_enable:
        pmao_restore_workaround(ebb);
 
+       if (ppmu->flags & PPMU_ARCH_207S)
+               mtspr(SPRN_MMCR2, 0);
+
        mmcr0 = ebb_switch_in(ebb, cpuhw->mmcr[0]);
 
        mb();
@@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event)
 
        if (has_branch_stack(event)) {
                /* PMU has BHRB enabled */
-               if (!(ppmu->flags & PPMU_BHRB))
+               if (!(ppmu->flags & PPMU_ARCH_207S))
                        return -EOPNOTSUPP;
        }
 
index fe2763b..639cd91 100644 (file)
@@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = {
        .get_constraint         = power8_get_constraint,
        .get_alternatives       = power8_get_alternatives,
        .disable_pmc            = power8_disable_pmc,
-       .flags                  = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB,
+       .flags                  = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
        .n_generic              = ARRAY_SIZE(power8_generic_events),
        .generic_events         = power8_generic_events,
        .cache_events           = &power8_cache_events,
index 94560db..2c15ff0 100644 (file)
@@ -125,7 +125,7 @@ static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, i
 static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
 {
        u64 reg_value;
-       int temp;
+       unsigned int temp;
        u64 new_value;
        int ret;
 
index 38e0a1a..5e6e0ba 100644 (file)
@@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
        return ret;
 }
 
+#ifdef CONFIG_COREDUMP
 int elf_coredump_extra_notes_size(void)
 {
        struct spufs_calls *calls;
@@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm)
 
        return ret;
 }
+#endif
 
 void notify_spus_active(void)
 {
index b9d5d67..52a7d25 100644 (file)
@@ -1,8 +1,9 @@
 
 obj-$(CONFIG_SPU_FS) += spufs.o
-spufs-y += inode.o file.o context.o syscalls.o coredump.o
+spufs-y += inode.o file.o context.o syscalls.o
 spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
 spufs-y += switch.o fault.o lscsa_alloc.o
+spufs-$(CONFIG_COREDUMP) += coredump.o
 
 # magic for the trace events
 CFLAGS_sched.o := -I$(src)
index b045fdd..a87200a 100644 (file)
@@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
 struct spufs_calls spufs_calls = {
        .create_thread = do_spu_create,
        .spu_run = do_spu_run,
-       .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
-       .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
        .notify_spus_active = do_notify_spus_active,
        .owner = THIS_MODULE,
+#ifdef CONFIG_COREDUMP
+       .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
+       .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
+#endif
 };
index d55891f..4ad227d 100644 (file)
@@ -1,4 +1,4 @@
-obj-y                  += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o
+obj-y                  += setup.o opal-wrappers.o opal.o opal-async.o
 obj-y                  += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
 obj-y                  += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
 obj-y                  += opal-msglog.o
index 10268c4..0ad533b 100644 (file)
@@ -249,7 +249,7 @@ static void elog_work_fn(struct work_struct *work)
 
        rc = opal_get_elog_size(&id, &size, &type);
        if (rc != OPAL_SUCCESS) {
-               pr_err("ELOG: Opal log read failed\n");
+               pr_err("ELOG: OPAL log info read failed\n");
                return;
        }
 
@@ -257,7 +257,7 @@ static void elog_work_fn(struct work_struct *work)
        log_id = be64_to_cpu(id);
        elog_type = be64_to_cpu(type);
 
-       BUG_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
+       WARN_ON(elog_size > OPAL_MAX_ERRLOG_SIZE);
 
        if (elog_size >= OPAL_MAX_ERRLOG_SIZE)
                elog_size  =  OPAL_MAX_ERRLOG_SIZE;
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
deleted file mode 100644 (file)
index 11a3169..0000000
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * PowerNV OPAL takeover assembly code, for use by prom_init.c
- *
- * Copyright 2011 IBM Corp.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <asm/ppc_asm.h>
-#include <asm/hvcall.h>
-#include <asm/asm-offsets.h>
-#include <asm/opal.h>
-
-#define H_HAL_TAKEOVER                 0x5124
-#define H_HAL_TAKEOVER_QUERY_MAGIC     -1
-
-       .text
-_GLOBAL(opal_query_takeover)
-       mfcr    r0
-       stw     r0,8(r1)
-       stdu    r1,-STACKFRAMESIZE(r1)
-       std     r3,STK_PARAM(R3)(r1)
-       std     r4,STK_PARAM(R4)(r1)
-       li      r3,H_HAL_TAKEOVER
-       li      r4,H_HAL_TAKEOVER_QUERY_MAGIC
-       HVSC
-       addi    r1,r1,STACKFRAMESIZE
-       ld      r10,STK_PARAM(R3)(r1)
-       std     r4,0(r10)
-       ld      r10,STK_PARAM(R4)(r1)
-       std     r5,0(r10)
-       lwz     r0,8(r1)
-       mtcrf   0xff,r0
-       blr
-
-_GLOBAL(opal_do_takeover)
-       mfcr    r0
-       stw     r0,8(r1)
-       mflr    r0
-       std     r0,16(r1)
-       bl      __opal_do_takeover
-       ld      r0,16(r1)
-       mtlr    r0
-       lwz     r0,8(r1)
-       mtcrf   0xff,r0
-       blr
-
-__opal_do_takeover:
-       ld      r4,0(r3)
-       ld      r5,0x8(r3)
-       ld      r6,0x10(r3)
-       ld      r7,0x18(r3)
-       ld      r8,0x20(r3)
-       ld      r9,0x28(r3)
-       ld      r10,0x30(r3)
-       ld      r11,0x38(r3)
-       li      r3,H_HAL_TAKEOVER
-       HVSC
-       blr
-
-       .globl opal_secondary_entry
-opal_secondary_entry:
-       mr      r31,r3
-       mfmsr   r11
-       li      r12,(MSR_SF | MSR_ISF)@highest
-       sldi    r12,r12,48
-       or      r11,r11,r12
-       mtmsrd  r11
-       isync
-       mfspr   r4,SPRN_PIR
-       std     r4,0(r3)
-1:     HMT_LOW
-       ld      r4,8(r3)
-       cmpli   cr0,r4,0
-       beq     1b
-       HMT_MEDIUM
-1:     addi    r3,r31,16
-       bl      __opal_do_takeover
-       b       1b
-
-_GLOBAL(opal_enter_rtas)
-       mflr    r0
-       std     r0,16(r1)
-        stdu   r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
-
-       /* Because PROM is running in 32b mode, it clobbers the high order half
-        * of all registers that it saves.  We therefore save those registers
-        * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
-       */
-       SAVE_GPR(2, r1)
-       SAVE_GPR(13, r1)
-       SAVE_8GPRS(14, r1)
-       SAVE_10GPRS(22, r1)
-       mfcr    r10
-       mfmsr   r11
-       std     r10,_CCR(r1)
-       std     r11,_MSR(r1)
-
-       /* Get the PROM entrypoint */
-       mtlr    r5
-
-       /* Switch MSR to 32 bits mode
-        */
-        li      r12,1
-        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
-        andc    r11,r11,r12
-        li      r12,1
-        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
-        andc    r11,r11,r12
-        mtmsrd  r11
-        isync
-
-       /* Enter RTAS here... */
-       blrl
-
-       /* Just make sure that r1 top 32 bits didn't get
-        * corrupt by OF
-        */
-       rldicl  r1,r1,0,32
-
-       /* Restore the MSR (back to 64 bits) */
-       ld      r0,_MSR(r1)
-       MTMSRD(r0)
-        isync
-
-       /* Restore other registers */
-       REST_GPR(2, r1)
-       REST_GPR(13, r1)
-       REST_8GPRS(14, r1)
-       REST_10GPRS(22, r1)
-       ld      r4,_CCR(r1)
-       mtcr    r4
-
-        addi   r1,r1,PROM_FRAME_SIZE
-       ld      r0,16(r1)
-       mtlr    r0
-       blr
index 022b38e..2d0b4d6 100644 (file)
@@ -86,6 +86,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
        }
 
        of_node_set_flag(dn, OF_DYNAMIC);
+       of_node_init(dn);
 
        return dn;
 }
index 0435bb6..1c0a60d 100644 (file)
@@ -69,6 +69,7 @@ static int pSeries_reconfig_add_node(const char *path, struct property *proplist
 
        np->properties = proplist;
        of_node_set_flag(np, OF_DYNAMIC);
+       of_node_init(np);
 
        np->parent = derive_parent(path);
        if (IS_ERR(np->parent)) {
index 62c47bb..9e5353f 100644 (file)
@@ -476,6 +476,11 @@ void __init alloc_dart_table(void)
         */
        dart_tablebase = (unsigned long)
                __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
+       /*
+        * The DART space is later unmapped from the kernel linear mapping and
+        * accessing dart_tablebase during kmemleak scanning will fault.
+        */
+       kmemleak_no_scan((void *)dart_tablebase);
 
        printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
 }
index 8df022c..fd09a10 100644 (file)
@@ -45,7 +45,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_PREEMPT=y
 CONFIG_HZ_100=y
@@ -240,7 +241,6 @@ CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -456,6 +456,7 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
+CONFIG_DIAG288_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
index c81a74e..b061180 100644 (file)
@@ -45,7 +45,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -238,7 +239,6 @@ CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -453,6 +453,7 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
+CONFIG_DIAG288_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
index b5ba8fe..d279baa 100644 (file)
@@ -43,7 +43,8 @@ CONFIG_SOLARIS_X86_PARTITION=y
 CONFIG_UNIXWARE_DISKLABEL=y
 CONFIG_CFQ_GROUP_IOSCHED=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 CONFIG_NR_CPUS=256
 CONFIG_HZ_100=y
 CONFIG_MEMORY_HOTPLUG=y
@@ -236,7 +237,6 @@ CONFIG_IP_VS_PE_SIP=m
 CONFIG_NF_CONNTRACK_IPV4=m
 # CONFIG_NF_CONNTRACK_PROC_COMPAT is not set
 CONFIG_NF_TABLES_IPV4=m
-CONFIG_NFT_REJECT_IPV4=m
 CONFIG_NFT_CHAIN_ROUTE_IPV4=m
 CONFIG_NFT_CHAIN_NAT_IPV4=m
 CONFIG_NF_TABLES_ARP=m
@@ -451,6 +451,7 @@ CONFIG_TN3270_FS=y
 CONFIG_WATCHDOG=y
 CONFIG_WATCHDOG_NOWAYOUT=y
 CONFIG_SOFT_WATCHDOG=m
+CONFIG_DIAG288_WATCHDOG=m
 # CONFIG_HID is not set
 # CONFIG_USB_SUPPORT is not set
 CONFIG_INFINIBAND=m
index cef073c..948e0e0 100644 (file)
@@ -8,7 +8,8 @@ CONFIG_CC_OPTIMIZE_FOR_SIZE=y
 CONFIG_PARTITION_ADVANCED=y
 CONFIG_IBM_PARTITION=y
 CONFIG_DEFAULT_DEADLINE=y
-CONFIG_MARCH_Z9_109=y
+CONFIG_MARCH_Z196=y
+CONFIG_TUNE_ZEC12=y
 # CONFIG_COMPAT is not set
 CONFIG_NR_CPUS=2
 # CONFIG_HOTPLUG_CPU is not set
index 4557cb7..2e56498 100644 (file)
@@ -135,8 +135,8 @@ CONFIG_PROVE_LOCKING=y
 CONFIG_LOCK_STAT=y
 CONFIG_DEBUG_LOCKDEP=y
 CONFIG_DEBUG_ATOMIC_SLEEP=y
-CONFIG_DEBUG_WRITECOUNT=y
 CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_PI_LIST=y
 CONFIG_DEBUG_SG=y
 CONFIG_DEBUG_NOTIFIERS=y
 CONFIG_PROVE_RCU=y
@@ -199,4 +199,10 @@ CONFIG_CRYPTO_SHA512_S390=m
 CONFIG_CRYPTO_DES_S390=m
 CONFIG_CRYPTO_AES_S390=m
 CONFIG_CRC7=m
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
 CONFIG_CMM=m
index c28f32a..3815bfe 100644 (file)
@@ -33,10 +33,9 @@ static inline int init_new_context(struct task_struct *tsk,
 
 static inline void set_user_asce(struct mm_struct *mm)
 {
-       pgd_t *pgd = mm->pgd;
-
-       S390_lowcore.user_asce = mm->context.asce_bits | __pa(pgd);
-       set_fs(current->thread.mm_segment);
+       S390_lowcore.user_asce = mm->context.asce_bits | __pa(mm->pgd);
+       if (current->thread.mm_segment.ar4)
+               __ctl_load(S390_lowcore.user_asce, 7, 7);
        set_cpu_flag(CIF_ASCE);
 }
 
@@ -70,12 +69,11 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
        /* Clear old ASCE by loading the kernel ASCE. */
        __ctl_load(S390_lowcore.kernel_asce, 1, 1);
        __ctl_load(S390_lowcore.kernel_asce, 7, 7);
-       /* Delay loading of the new ASCE to control registers CR1 & CR7 */
-       set_cpu_flag(CIF_ASCE);
        atomic_inc(&next->context.attach_count);
        atomic_dec(&prev->context.attach_count);
        if (MACHINE_HAS_TLB_LC)
                cpumask_clear_cpu(cpu, &prev->context.cpu_attach_mask);
+       S390_lowcore.user_asce = next->context.asce_bits | __pa(next->pgd);
 }
 
 #define finish_arch_post_lock_switch finish_arch_post_lock_switch
@@ -84,17 +82,18 @@ static inline void finish_arch_post_lock_switch(void)
        struct task_struct *tsk = current;
        struct mm_struct *mm = tsk->mm;
 
-       if (!mm)
-               return;
-       preempt_disable();
-       while (atomic_read(&mm->context.attach_count) >> 16)
-               cpu_relax();
-
-       cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
-       set_user_asce(mm);
-       if (mm->context.flush_mm)
-               __tlb_flush_mm(mm);
-       preempt_enable();
+       load_kernel_asce();
+       if (mm) {
+               preempt_disable();
+               while (atomic_read(&mm->context.attach_count) >> 16)
+                       cpu_relax();
+
+               cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
+               if (mm->context.flush_mm)
+                       __tlb_flush_mm(mm);
+               preempt_enable();
+       }
+       set_fs(current->thread.mm_segment);
 }
 
 #define enter_lazy_tlb(mm,tsk) do { } while (0)
index 29c81f8..18ea9e3 100644 (file)
@@ -51,8 +51,8 @@ static inline int restore_fp_ctl(u32 *fpc)
                return 0;
 
        asm volatile(
-               "0:     lfpc    %1\n"
-               "       la      %0,0\n"
+               "       lfpc    %1\n"
+               "0:     la      %0,0\n"
                "1:\n"
                EX_TABLE(0b,1b)
                : "=d" (rc) : "Q" (*fpc), "0" (-EINVAL));
@@ -134,8 +134,4 @@ static inline void restore_access_regs(unsigned int *acrs)
        prev = __switch_to(prev,next);                                  \
 } while (0)
 
-#define finish_arch_switch(prev) do {                                       \
-       set_fs(current->thread.mm_segment);                                  \
-} while (0)
-
 #endif /* __ASM_SWITCH_TO_H */
index 6a9a9eb..7366373 100644 (file)
@@ -36,6 +36,7 @@ header-y += signal.h
 header-y += socket.h
 header-y += sockios.h
 header-y += sclp_ctl.h
+header-y += sie.h
 header-y += stat.h
 header-y += statfs.h
 header-y += swab.h
index 3d97f61..5d9cc19 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _UAPI_ASM_S390_SIE_H
 #define _UAPI_ASM_S390_SIE_H
 
-#include <asm/sigp.h>
-
 #define diagnose_codes                                         \
        { 0x10, "DIAG (0x10) release pages" },                  \
        { 0x44, "DIAG (0x44) time slice end" },                 \
        { 0x500, "DIAG (0x500) KVM virtio functions" },         \
        { 0x501, "DIAG (0x501) KVM breakpoint" }
 
-#define sigp_order_codes                                               \
-       { SIGP_SENSE, "SIGP sense" },                                   \
-       { SIGP_EXTERNAL_CALL, "SIGP external call" },                   \
-       { SIGP_EMERGENCY_SIGNAL, "SIGP emergency signal" },             \
-       { SIGP_STOP, "SIGP stop" },                                     \
-       { SIGP_STOP_AND_STORE_STATUS, "SIGP stop and store status" },   \
-       { SIGP_SET_ARCHITECTURE, "SIGP set architecture" },             \
-       { SIGP_SET_PREFIX, "SIGP set prefix" },                         \
-       { SIGP_SENSE_RUNNING, "SIGP sense running" },                   \
-       { SIGP_RESTART, "SIGP restart" },                               \
-       { SIGP_INITIAL_CPU_RESET, "SIGP initial cpu reset" },           \
-       { SIGP_STORE_STATUS_AT_ADDRESS, "SIGP store status at address" }
+#define sigp_order_codes                                       \
+       { 0x01, "SIGP sense" },                                 \
+       { 0x02, "SIGP external call" },                         \
+       { 0x03, "SIGP emergency signal" },                      \
+       { 0x05, "SIGP stop" },                                  \
+       { 0x06, "SIGP restart" },                               \
+       { 0x09, "SIGP stop and store status" },                 \
+       { 0x0b, "SIGP initial cpu reset" },                     \
+       { 0x0d, "SIGP set prefix" },                            \
+       { 0x0e, "SIGP store status at address" },               \
+       { 0x12, "SIGP set architecture" },                      \
+       { 0x15, "SIGP sense running" }
 
 #define icpt_prog_codes                                                \
        { 0x0001, "Prog Operation" },                           \
index 200e063..3e077b2 100644 (file)
@@ -16,7 +16,9 @@ struct ucontext_extended {
        struct ucontext  *uc_link;
        stack_t           uc_stack;
        _sigregs          uc_mcontext;
-       unsigned long     uc_sigmask[2];
+       sigset_t          uc_sigmask;
+       /* Allow for uc_sigmask growth.  Glibc uses a 1024-bit sigset_t.  */
+       unsigned char     __unused[128 - sizeof(sigset_t)];
        unsigned long     uc_gprs_high[16];
 };
 
@@ -27,7 +29,9 @@ struct ucontext {
        struct ucontext  *uc_link;
        stack_t           uc_stack;
        _sigregs          uc_mcontext;
-       sigset_t          uc_sigmask;   /* mask last for extensibility */
+       sigset_t          uc_sigmask;
+       /* Allow for uc_sigmask growth.  Glibc uses a 1024-bit sigset_t.  */
+       unsigned char     __unused[128 - sizeof(sigset_t)];
 };
 
 #endif /* !_ASM_S390_UCONTEXT_H */
index 39ddfdb..70d4b7c 100644 (file)
@@ -69,7 +69,9 @@ struct ucontext32 {
        __u32                   uc_link;        /* pointer */   
        compat_stack_t          uc_stack;
        _sigregs32              uc_mcontext;
-       compat_sigset_t         uc_sigmask;     /* mask last for extensibility */
+       compat_sigset_t         uc_sigmask;
+       /* Allow for uc_sigmask growth.  Glibc uses a 1024-bit sigset_t.  */
+       unsigned char           __unused[128 - sizeof(compat_sigset_t)];
 };
 
 struct stat64_emu31;
index 7ba7d67..e88d35d 100644 (file)
@@ -437,11 +437,11 @@ ENTRY(startup_kdump)
 
 #if defined(CONFIG_64BIT)
 #if defined(CONFIG_MARCH_ZEC12)
-       .long 3, 0xc100efea, 0xf46ce800, 0x00400000
+       .long 3, 0xc100eff2, 0xf46ce800, 0x00400000
 #elif defined(CONFIG_MARCH_Z196)
-       .long 2, 0xc100efea, 0xf46c0000
+       .long 2, 0xc100eff2, 0xf46c0000
 #elif defined(CONFIG_MARCH_Z10)
-       .long 2, 0xc100efea, 0xf0680000
+       .long 2, 0xc100eff2, 0xf0680000
 #elif defined(CONFIG_MARCH_Z9_109)
        .long 1, 0xc100efc2
 #elif defined(CONFIG_MARCH_Z990)
index 2d71673..5dc7ad9 100644 (file)
@@ -334,9 +334,14 @@ static int __poke_user(struct task_struct *child, addr_t addr, addr_t data)
                        unsigned long mask = PSW_MASK_USER;
 
                        mask |= is_ri_task(child) ? PSW_MASK_RI : 0;
-                       if ((data & ~mask) != PSW_USER_BITS)
+                       if ((data ^ PSW_USER_BITS) & ~mask)
+                               /* Invalid psw mask. */
+                               return -EINVAL;
+                       if ((data & PSW_MASK_ASC) == PSW_ASC_HOME)
+                               /* Invalid address-space-control bits */
                                return -EINVAL;
                        if ((data & PSW_MASK_EA) && !(data & PSW_MASK_BA))
+                               /* Invalid addressing mode bits */
                                return -EINVAL;
                }
                *(addr_t *)((addr_t) &task_pt_regs(child)->psw + addr) = data;
@@ -672,9 +677,12 @@ static int __poke_user_compat(struct task_struct *child,
 
                        mask |= is_ri_task(child) ? PSW32_MASK_RI : 0;
                        /* Build a 64 bit psw mask from 31 bit mask. */
-                       if ((tmp & ~mask) != PSW32_USER_BITS)
+                       if ((tmp ^ PSW32_USER_BITS) & ~mask)
                                /* Invalid psw mask. */
                                return -EINVAL;
+                       if ((data & PSW32_MASK_ASC) == PSW32_ASC_HOME)
+                               /* Invalid address-space-control bits */
+                               return -EINVAL;
                        regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) |
                                (regs->psw.mask & PSW_MASK_BA) |
                                (__u64)(tmp & mask) << 32;
index a2cbd87..61e45b7 100644 (file)
@@ -812,7 +812,7 @@ static struct bpf_binary_header *bpf_alloc_binary(unsigned int bpfsize,
        return header;
 }
 
-void bpf_jit_compile(struct sk_filter *fp)
+void bpf_jit_compile(struct bpf_prog *fp)
 {
        struct bpf_binary_header *header = NULL;
        unsigned long size, prg_len, lit_len;
@@ -875,7 +875,7 @@ out:
        kfree(addrs);
 }
 
-void bpf_jit_free(struct sk_filter *fp)
+void bpf_jit_free(struct bpf_prog *fp)
 {
        unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
        struct bpf_binary_header *header = (void *)addr;
index 9ddc51e..30de427 100644 (file)
 static LIST_HEAD(zpci_list);
 static DEFINE_SPINLOCK(zpci_list_lock);
 
-static void zpci_enable_irq(struct irq_data *data);
-static void zpci_disable_irq(struct irq_data *data);
-
 static struct irq_chip zpci_irq_chip = {
        .name = "zPCI",
-       .irq_unmask = zpci_enable_irq,
-       .irq_mask = zpci_disable_irq,
+       .irq_unmask = unmask_msi_irq,
+       .irq_mask = mask_msi_irq,
 };
 
 static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES);
@@ -244,43 +241,6 @@ static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
        return rc;
 }
 
-static int zpci_msi_set_mask_bits(struct msi_desc *msi, u32 mask, u32 flag)
-{
-       int offset, pos;
-       u32 mask_bits;
-
-       if (msi->msi_attrib.is_msix) {
-               offset = msi->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE +
-                       PCI_MSIX_ENTRY_VECTOR_CTRL;
-               msi->masked = readl(msi->mask_base + offset);
-               writel(flag, msi->mask_base + offset);
-       } else if (msi->msi_attrib.maskbit) {
-               pos = (long) msi->mask_base;
-               pci_read_config_dword(msi->dev, pos, &mask_bits);
-               mask_bits &= ~(mask);
-               mask_bits |= flag & mask;
-               pci_write_config_dword(msi->dev, pos, mask_bits);
-       } else
-               return 0;
-
-       msi->msi_attrib.maskbit = !!flag;
-       return 1;
-}
-
-static void zpci_enable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 0);
-}
-
-static void zpci_disable_irq(struct irq_data *data)
-{
-       struct msi_desc *msi = irq_get_msi_desc(data->irq);
-
-       zpci_msi_set_mask_bits(msi, 1, 1);
-}
-
 void pcibios_fixup_bus(struct pci_bus *bus)
 {
 }
@@ -487,7 +447,10 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev)
 
        /* Release MSI interrupts */
        list_for_each_entry(msi, &pdev->msi_list, list) {
-               zpci_msi_set_mask_bits(msi, 1, 1);
+               if (msi->msi_attrib.is_msix)
+                       default_msix_mask_irq(msi, 1);
+               else
+                       default_msi_mask_irq(msi, 1, 1);
                irq_set_msi_desc(msi->irq, NULL);
                irq_free_desc(msi->irq);
                msi->msg.address_lo = 0;
index d4d16e4..bf5b3f5 100644 (file)
@@ -32,7 +32,8 @@ endif
 
 cflags-$(CONFIG_CPU_SH2)               := $(call cc-option,-m2,)
 cflags-$(CONFIG_CPU_SH2A)              += $(call cc-option,-m2a,) \
-                                          $(call cc-option,-m2a-nofpu,)
+                                          $(call cc-option,-m2a-nofpu,) \
+                                          $(call cc-option,-m4-nofpu,)
 cflags-$(CONFIG_CPU_SH3)               := $(call cc-option,-m3,)
 cflags-$(CONFIG_CPU_SH4)               := $(call cc-option,-m4,) \
        $(call cc-option,-mno-implicit-fp,-m4-nofpu)
index 29f2e98..407c87d 100644 (file)
@@ -78,6 +78,7 @@ config SPARC64
        select HAVE_C_RECORDMCOUNT
        select NO_BOOTMEM
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config ARCH_DEFCONFIG
        string
index 503e6d9..df922f5 100644 (file)
@@ -124,7 +124,7 @@ extern void aes_sparc64_ctr_crypt_256(const u64 *key, const u64 *input,
                                      u64 *output, unsigned int len,
                                      u64 *iv);
 
-struct aes_ops aes128_ops = {
+static struct aes_ops aes128_ops = {
        .encrypt                = aes_sparc64_encrypt_128,
        .decrypt                = aes_sparc64_decrypt_128,
        .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_128,
@@ -136,7 +136,7 @@ struct aes_ops aes128_ops = {
        .ctr_crypt              = aes_sparc64_ctr_crypt_128,
 };
 
-struct aes_ops aes192_ops = {
+static struct aes_ops aes192_ops = {
        .encrypt                = aes_sparc64_encrypt_192,
        .decrypt                = aes_sparc64_decrypt_192,
        .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_192,
@@ -148,7 +148,7 @@ struct aes_ops aes192_ops = {
        .ctr_crypt              = aes_sparc64_ctr_crypt_192,
 };
 
-struct aes_ops aes256_ops = {
+static struct aes_ops aes256_ops = {
        .encrypt                = aes_sparc64_encrypt_256,
        .decrypt                = aes_sparc64_decrypt_256,
        .load_encrypt_keys      = aes_sparc64_load_encrypt_keys_256,
index f08fe51..7aed2be 100644 (file)
 
 #define ATOMIC_INIT(i)  { (i) }
 
-extern int __atomic_add_return(int, atomic_t *);
-extern int atomic_cmpxchg(atomic_t *, int, int);
+int __atomic_add_return(int, atomic_t *);
+int atomic_cmpxchg(atomic_t *, int, int);
 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
-extern int __atomic_add_unless(atomic_t *, int, int);
-extern void atomic_set(atomic_t *, int);
+int __atomic_add_unless(atomic_t *, int, int);
+void atomic_set(atomic_t *, int);
 
 #define atomic_read(v)          (*(volatile int *)&(v)->counter)
 
index 8b2f1bd..bb894c8 100644 (file)
 #define atomic_set(v, i)       (((v)->counter) = i)
 #define atomic64_set(v, i)     (((v)->counter) = i)
 
-extern void atomic_add(int, atomic_t *);
-extern void atomic64_add(long, atomic64_t *);
-extern void atomic_sub(int, atomic_t *);
-extern void atomic64_sub(long, atomic64_t *);
+void atomic_add(int, atomic_t *);
+void atomic64_add(long, atomic64_t *);
+void atomic_sub(int, atomic_t *);
+void atomic64_sub(long, atomic64_t *);
 
-extern int atomic_add_ret(int, atomic_t *);
-extern long atomic64_add_ret(long, atomic64_t *);
-extern int atomic_sub_ret(int, atomic_t *);
-extern long atomic64_sub_ret(long, atomic64_t *);
+int atomic_add_ret(int, atomic_t *);
+long atomic64_add_ret(long, atomic64_t *);
+int atomic_sub_ret(int, atomic_t *);
+long atomic64_sub_ret(long, atomic64_t *);
 
 #define atomic_dec_return(v) atomic_sub_ret(1, v)
 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
@@ -107,6 +107,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 
 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
 
-extern long atomic64_dec_if_positive(atomic64_t *v);
+long atomic64_dec_if_positive(atomic64_t *v);
 
 #endif /* !(__ARCH_SPARC64_ATOMIC__) */
index 13dc67f..3e09a07 100644 (file)
@@ -1,5 +1,12 @@
 #ifndef ___ASM_SPARC_AUXIO_H
 #define ___ASM_SPARC_AUXIO_H
+
+#ifndef __ASSEMBLY__
+
+extern void __iomem *auxio_register;
+
+#endif /* ifndef __ASSEMBLY__ */
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/auxio_64.h>
 #else
index 3a31977..5d685df 100644 (file)
@@ -34,8 +34,8 @@
  * NOTE: these routines are implementation dependent--
  * understand the hardware you are querying!
  */
-extern void set_auxio(unsigned char bits_on, unsigned char bits_off);
-extern unsigned char get_auxio(void); /* .../asm/floppy.h */
+void set_auxio(unsigned char bits_on, unsigned char bits_off);
+unsigned char get_auxio(void); /* .../asm/floppy.h */
 
 /*
  * The following routines are provided for driver-compatibility
@@ -78,7 +78,7 @@ do { \
 
 
 /* AUXIO2 (Power Off Control) */
-extern __volatile__ unsigned char * auxio_power_register;
+extern volatile u8 __iomem *auxio_power_register;
 
 #define        AUXIO_POWER_DETECT_FAILURE      32
 #define        AUXIO_POWER_CLEAR_FAILURE       2
index f61cd1e..6079e59 100644 (file)
@@ -75,8 +75,6 @@
 
 #ifndef __ASSEMBLY__
 
-extern void __iomem *auxio_register;
-
 #define AUXIO_LTE_ON   1
 #define AUXIO_LTE_OFF  0
 
@@ -84,7 +82,7 @@ extern void __iomem *auxio_register;
  *
  * on - AUXIO_LTE_ON or AUXIO_LTE_OFF
  */
-extern void auxio_set_lte(int on);
+void auxio_set_lte(int on);
 
 #define AUXIO_LED_ON   1
 #define AUXIO_LED_OFF  0
@@ -93,7 +91,7 @@ extern void auxio_set_lte(int on);
  *
  * on - AUXIO_LED_ON or AUXIO_LED_OFF
  */
-extern void auxio_set_led(int on);
+void auxio_set_led(int on);
 
 #endif /* ifndef __ASSEMBLY__ */
 
index 297b2f2..9c988bf 100644 (file)
@@ -20,8 +20,8 @@ struct bit_map {
        int num_colors;
 };
 
-extern int bit_map_string_get(struct bit_map *t, int len, int align);
-extern void bit_map_clear(struct bit_map *t, int offset, int len);
-extern void bit_map_init(struct bit_map *t, unsigned long *map, int size);
+int bit_map_string_get(struct bit_map *t, int len, int align);
+void bit_map_clear(struct bit_map *t, int offset, int len);
+void bit_map_init(struct bit_map *t, unsigned long *map, int size);
 
 #endif /* defined(_SPARC_BITEXT_H) */
index 88c9a96..600ed1d 100644 (file)
@@ -18,9 +18,9 @@
 #error only <linux/bitops.h> can be included directly
 #endif
 
-extern unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
-extern unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
-extern unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___set_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___clear_bit(unsigned long *addr, unsigned long mask);
+unsigned long ___change_bit(unsigned long *addr, unsigned long mask);
 
 /*
  * Set bit 'nr' in 32-bit quantity at address 'addr' where bit '0'
index f1a051c..2d52240 100644 (file)
 #include <asm/byteorder.h>
 #include <asm/barrier.h>
 
-extern int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
-extern int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
-extern int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
-extern void set_bit(unsigned long nr, volatile unsigned long *addr);
-extern void clear_bit(unsigned long nr, volatile unsigned long *addr);
-extern void change_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_set_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_clear_bit(unsigned long nr, volatile unsigned long *addr);
+int test_and_change_bit(unsigned long nr, volatile unsigned long *addr);
+void set_bit(unsigned long nr, volatile unsigned long *addr);
+void clear_bit(unsigned long nr, volatile unsigned long *addr);
+void change_bit(unsigned long nr, volatile unsigned long *addr);
 
 #include <asm-generic/bitops/non-atomic.h>
 
@@ -30,8 +30,8 @@ extern void change_bit(unsigned long nr, volatile unsigned long *addr);
 
 #ifdef __KERNEL__
 
-extern int ffs(int x);
-extern unsigned long __ffs(unsigned long);
+int ffs(int x);
+unsigned long __ffs(unsigned long);
 
 #include <asm-generic/bitops/ffz.h>
 #include <asm-generic/bitops/sched.h>
@@ -41,10 +41,10 @@ extern unsigned long __ffs(unsigned long);
  * of bits set) of a N-bit word
  */
 
-extern unsigned long __arch_hweight64(__u64 w);
-extern unsigned int __arch_hweight32(unsigned int w);
-extern unsigned int __arch_hweight16(unsigned int w);
-extern unsigned int __arch_hweight8(unsigned int w);
+unsigned long __arch_hweight64(__u64 w);
+unsigned int __arch_hweight32(unsigned int w);
+unsigned int __arch_hweight16(unsigned int w);
+unsigned int __arch_hweight8(unsigned int w);
 
 #include <asm-generic/bitops/const_hweight.h>
 #include <asm-generic/bitops/lock.h>
index 9b2bc6b..75a32b1 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _SPARC_BTEXT_H
 #define _SPARC_BTEXT_H
 
-extern int btext_find_display(void);
+int btext_find_display(void);
 
 #endif /* _SPARC_BTEXT_H */
index 6bd9f43..eaa8f8d 100644 (file)
@@ -5,7 +5,7 @@
 #include <linux/compiler.h>
 
 #ifdef CONFIG_DEBUG_BUGVERBOSE
-extern void do_BUG(const char *file, int line);
+void do_BUG(const char *file, int line);
 #define BUG() do {                                     \
        do_BUG(__FILE__, __LINE__);                     \
        __builtin_trap();                               \
@@ -20,6 +20,6 @@ extern void do_BUG(const char *file, int line);
 #include <asm-generic/bug.h>
 
 struct pt_regs;
-extern void die_if_kernel(char *str, struct pt_regs *regs) __attribute__ ((noreturn));
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs);
 
 #endif
index bb014c2..1216400 100644 (file)
@@ -36,7 +36,7 @@
 #define flush_page_for_dma(addr) \
        sparc32_cachetlb_ops->page_for_dma(addr)
 
-extern void sparc_flush_page_to_ram(struct page *page);
+void sparc_flush_page_to_ram(struct page *page);
 
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
 #define flush_dcache_page(page)                        sparc_flush_page_to_ram(page)
@@ -51,8 +51,8 @@ extern void sparc_flush_page_to_ram(struct page *page);
  * way the windows are all clean for the next process and the stack
  * frames are up to date.
  */
-extern void flush_user_windows(void);
-extern void kill_user_windows(void);
-extern void flushw_all(void);
+void flush_user_windows(void);
+void kill_user_windows(void);
+void flushw_all(void);
 
 #endif /* _SPARC_CACHEFLUSH_H */
index 301736d..3896537 100644 (file)
@@ -10,7 +10,7 @@
 /* Cache flush operations. */
 #define flushw_all()   __asm__ __volatile__("flushw")
 
-extern void __flushw_user(void);
+void __flushw_user(void);
 #define flushw_user() __flushw_user()
 
 #define flush_user_windows flushw_user
@@ -30,29 +30,29 @@ extern void __flushw_user(void);
  * use block commit stores (which invalidate icache lines) during
  * module load, so we need this.
  */
-extern void flush_icache_range(unsigned long start, unsigned long end);
-extern void __flush_icache_page(unsigned long);
+void flush_icache_range(unsigned long start, unsigned long end);
+void __flush_icache_page(unsigned long);
 
-extern void __flush_dcache_page(void *addr, int flush_icache);
-extern void flush_dcache_page_impl(struct page *page);
+void __flush_dcache_page(void *addr, int flush_icache);
+void flush_dcache_page_impl(struct page *page);
 #ifdef CONFIG_SMP
-extern void smp_flush_dcache_page_impl(struct page *page, int cpu);
-extern void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
+void smp_flush_dcache_page_impl(struct page *page, int cpu);
+void flush_dcache_page_all(struct mm_struct *mm, struct page *page);
 #else
 #define smp_flush_dcache_page_impl(page,cpu) flush_dcache_page_impl(page)
 #define flush_dcache_page_all(mm,page) flush_dcache_page_impl(page)
 #endif
 
-extern void __flush_dcache_range(unsigned long start, unsigned long end);
+void __flush_dcache_range(unsigned long start, unsigned long end);
 #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-extern void flush_dcache_page(struct page *page);
+void flush_dcache_page(struct page *page);
 
 #define flush_icache_page(vma, pg)     do { } while(0)
 #define flush_icache_user_range(vma,pg,adr,len)        do { } while (0)
 
-extern void flush_ptrace_access(struct vm_area_struct *, struct page *,
-                               unsigned long uaddr, void *kaddr,
-                               unsigned long len, int write);
+void flush_ptrace_access(struct vm_area_struct *, struct page *,
+                        unsigned long uaddr, void *kaddr,
+                        unsigned long len, int write);
 
 #define copy_to_user_page(vma, page, vaddr, dst, src, len)             \
        do {                                                            \
index 04471dc..426b238 100644 (file)
@@ -29,7 +29,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern __wsum csum_partial(const void *buff, int len, __wsum sum);
+__wsum csum_partial(const void *buff, int len, __wsum sum);
 
 /* the same as csum_partial, but copies from fs:src while it
  * checksums
@@ -38,7 +38,7 @@ extern __wsum csum_partial(const void *buff, int len, __wsum sum);
  * better 64-bit) boundary
  */
 
-extern unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
+unsigned int __csum_partial_copy_sparc_generic (const unsigned char *, unsigned char *);
 
 static inline __wsum
 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
index 2ff81ae..b8779a6 100644 (file)
@@ -29,7 +29,7 @@
  *
  * it's best to have buff aligned on a 32-bit boundary
  */
-extern __wsum csum_partial(const void * buff, int len, __wsum sum);
+__wsum csum_partial(const void * buff, int len, __wsum sum);
 
 /* the same as csum_partial, but copies from user space while it
  * checksums
@@ -37,12 +37,12 @@ extern __wsum csum_partial(const void * buff, int len, __wsum sum);
  * here even more important to align src and dst on a 32-bit (or even
  * better 64-bit) boundary
  */
-extern __wsum csum_partial_copy_nocheck(const void *src, void *dst,
-                                             int len, __wsum sum);
+__wsum csum_partial_copy_nocheck(const void *src, void *dst,
+                                int len, __wsum sum);
 
-extern long __csum_partial_copy_from_user(const void __user *src,
-                                         void *dst, int len,
-                                         __wsum sum);
+long __csum_partial_copy_from_user(const void __user *src,
+                                  void *dst, int len,
+                                  __wsum sum);
 
 static inline __wsum
 csum_partial_copy_from_user(const void __user *src,
@@ -59,9 +59,9 @@ csum_partial_copy_from_user(const void __user *src,
  *     Copy and checksum to user
  */
 #define HAVE_CSUM_COPY_USER
-extern long __csum_partial_copy_to_user(const void *src,
-                                       void __user *dst, int len,
-                                         __wsum sum);
+long __csum_partial_copy_to_user(const void *src,
+                                void __user *dst, int len,
+                                __wsum sum);
 
 static inline __wsum
 csum_and_copy_to_user(const void *src,
@@ -77,7 +77,7 @@ csum_and_copy_to_user(const void *src,
 /* ihl is always 5 or greater, almost always is 5, and iph is word aligned
  * the majority of the time.
  */
-extern __sum16 ip_fast_csum(const void *iph, unsigned int ihl);
+__sum16 ip_fast_csum(const void *iph, unsigned int ihl);
 
 /* Fold a partial checksum without adding pseudo headers. */
 static inline __sum16 csum_fold(__wsum sum)
@@ -96,9 +96,9 @@ static inline __sum16 csum_fold(__wsum sum)
 }
 
 static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
-                                              unsigned int len,
-                                              unsigned short proto,
-                                              __wsum sum)
+                                       unsigned int len,
+                                       unsigned short proto,
+                                       __wsum sum)
 {
        __asm__ __volatile__(
 "      addcc           %1, %0, %0\n"
@@ -116,9 +116,9 @@ static inline __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
  * returns a 16-bit checksum, already complemented
  */
 static inline __sum16 csum_tcpudp_magic(__be32 saddr, __be32 daddr,
-                                                  unsigned short len,
-                                                  unsigned short proto,
-                                                  __wsum sum)
+                                       unsigned short len,
+                                       unsigned short proto,
+                                       __wsum sum)
 {
        return csum_fold(csum_tcpudp_nofold(saddr,daddr,len,proto,sum));
 }
index 1fae1a0..32c29a1 100644 (file)
@@ -20,7 +20,7 @@ static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned lon
        return val;
 }
 
-extern void __xchg_called_with_bad_pointer(void);
+void __xchg_called_with_bad_pointer(void);
 
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
 {
@@ -45,9 +45,9 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
 #define __HAVE_ARCH_CMPXCHG    1
 
 /* bug catcher for when unsupported size is used - won't link */
-extern void __cmpxchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
 /* we only need to support cmpxchg of a u32 on sparc */
-extern unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
+unsigned long __cmpxchg_u32(volatile u32 *m, u32 old, u32 new_);
 
 /* don't worry...optimizer will get rid of most of this */
 static inline unsigned long
index 4adefe8..0e1ed6c 100644 (file)
@@ -42,7 +42,7 @@ static inline unsigned long xchg64(__volatile__ unsigned long *m, unsigned long
 
 #define xchg(ptr,x) ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
 
-extern void __xchg_called_with_bad_pointer(void);
+void __xchg_called_with_bad_pointer(void);
 
 static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
                                       int size)
@@ -91,7 +91,7 @@ __cmpxchg_u64(volatile long *m, unsigned long old, unsigned long new)
 
 /* This function doesn't exist, so you'll get a linker error
    if something tries to do an invalid cmpxchg().  */
-extern void __cmpxchg_called_with_bad_pointer(void);
+void __cmpxchg_called_with_bad_pointer(void);
 
 static inline unsigned long
 __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new, int size)
index b5976de..128b56b 100644 (file)
@@ -1,5 +1,15 @@
 #ifndef ___ASM_SPARC_CPUDATA_H
 #define ___ASM_SPARC_CPUDATA_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/threads.h>
+#include <linux/percpu.h>
+
+extern const struct seq_operations cpuinfo_op;
+
+#endif /* !(__ASSEMBLY__) */
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/cpudata_64.h>
 #else
index 050ef35..0e59407 100644 (file)
@@ -8,9 +8,6 @@
 
 #ifndef __ASSEMBLY__
 
-#include <linux/percpu.h>
-#include <linux/threads.h>
-
 typedef struct {
        /* Dcache line 1 */
        unsigned int    __softirq_pending; /* must be 1st, see rtrap.S */
@@ -35,8 +32,6 @@ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
 #define cpu_data(__cpu)                per_cpu(__cpu_data, (__cpu))
 #define local_cpu_data()       __get_cpu_var(__cpu_data)
 
-extern const struct seq_operations cpuinfo_op;
-
 #endif /* !(__ASSEMBLY__) */
 
 #include <asm/trap_block.h>
index bc9aba2..3fb8ca1 100644 (file)
@@ -20,8 +20,8 @@ static inline void __delay(unsigned long loops)
 }
 
 /* This is too messy with inline asm on the Sparc. */
-extern void __udelay(unsigned long usecs, unsigned long lpj);
-extern void __ndelay(unsigned long nsecs, unsigned long lpj);
+void __udelay(unsigned long usecs, unsigned long lpj);
+void __ndelay(unsigned long nsecs, unsigned long lpj);
 
 #ifdef CONFIG_SMP
 #define __udelay_val   cpu_data(smp_processor_id()).udelay_val
index a77aa62..0ba5424 100644 (file)
@@ -8,8 +8,8 @@
 
 #ifndef __ASSEMBLY__
 
-extern void __delay(unsigned long loops);
-extern void udelay(unsigned long usecs);
+void __delay(unsigned long loops);
+void udelay(unsigned long usecs);
 #define mdelay(n)      udelay((n) * 1000)
 
 #endif /* !__ASSEMBLY__ */
index daa6a8a..bb3f0b0 100644 (file)
@@ -19,7 +19,7 @@ struct dev_archdata {
        int                     numa_node;
 };
 
-extern void of_propagate_archdata(struct platform_device *bus);
+void of_propagate_archdata(struct platform_device *bus);
 
 struct pdev_archdata {
        struct resource         resource[PROMREG_MAX];
index 05fe53f..1ee0271 100644 (file)
@@ -7,7 +7,7 @@
 
 #define DMA_ERROR_CODE (~(dma_addr_t)0x0)
 
-extern int dma_supported(struct device *dev, u64 mask);
+int dma_supported(struct device *dev, u64 mask);
 
 #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
index f07a5b5..fcfb494 100644 (file)
@@ -22,14 +22,14 @@ struct ebus_dma_info {
        unsigned char   name[64];
 };
 
-extern int ebus_dma_register(struct ebus_dma_info *p);
-extern int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
-extern void ebus_dma_unregister(struct ebus_dma_info *p);
-extern int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
+int ebus_dma_register(struct ebus_dma_info *p);
+int ebus_dma_irq_enable(struct ebus_dma_info *p, int on);
+void ebus_dma_unregister(struct ebus_dma_info *p);
+int ebus_dma_request(struct ebus_dma_info *p, dma_addr_t bus_addr,
                            size_t len);
-extern void ebus_dma_prepare(struct ebus_dma_info *p, int write);
-extern unsigned int ebus_dma_residue(struct ebus_dma_info *p);
-extern unsigned int ebus_dma_addr(struct ebus_dma_info *p);
-extern void ebus_dma_enable(struct ebus_dma_info *p, int on);
+void ebus_dma_prepare(struct ebus_dma_info *p, int write);
+unsigned int ebus_dma_residue(struct ebus_dma_info *p);
+unsigned int ebus_dma_addr(struct ebus_dma_info *p);
+void ebus_dma_enable(struct ebus_dma_info *p, int on);
 
 #endif /* __ASM_SPARC_EBUS_DMA_H */
index fb3f169..071b83e 100644 (file)
@@ -9,11 +9,12 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 
-#include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/idprom.h>
 #include <asm/oplib.h>
 #include <asm/auxio.h>
+#include <asm/setup.h>
+#include <asm/page.h>
 #include <asm/irq.h>
 
 /* We don't need no stinkin' I/O port allocation crap. */
@@ -49,7 +50,6 @@ struct sun_flpy_controller {
 
 /* You'll only ever find one controller on a SparcStation anyways. */
 static struct sun_flpy_controller *sun_fdc = NULL;
-extern volatile unsigned char *fdc_status;
 
 struct sun_floppy_ops {
        unsigned char (*fd_inb)(int port);
@@ -212,13 +212,6 @@ static void sun_82077_fd_outb(unsigned char value, int port)
  * underruns.  If non-zero, doing_pdma encodes the direction of
  * the transfer for debugging.  1=read 2=write
  */
-extern char *pdma_vaddr;
-extern unsigned long pdma_size;
-extern volatile int doing_pdma;
-
-/* This is software state */
-extern char *pdma_base;
-extern unsigned long pdma_areasize;
 
 /* Common routines to all controller types on the Sparc. */
 static inline void virtual_dma_init(void)
@@ -263,8 +256,7 @@ static inline void sun_fd_enable_dma(void)
        pdma_areasize = pdma_size;
 }
 
-extern int sparc_floppy_request_irq(unsigned int irq,
-                                    irq_handler_t irq_handler);
+int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
 
 static int sun_fd_request_irq(void)
 {
index 7c90c50..6257564 100644 (file)
@@ -296,7 +296,7 @@ struct sun_pci_dma_op {
 static struct sun_pci_dma_op sun_pci_dma_current = { -1U, 0, 0, NULL};
 static struct sun_pci_dma_op sun_pci_dma_pending = { -1U, 0, 0, NULL};
 
-extern irqreturn_t floppy_interrupt(int irq, void *dev_id);
+irqreturn_t floppy_interrupt(int irq, void *dev_id);
 
 static unsigned char sun_pci_fd_inb(unsigned long port)
 {
index b0f18e9..9ec94ad 100644 (file)
@@ -6,7 +6,7 @@
 #define MCOUNT_INSN_SIZE       4 /* sizeof mcount call */
 
 #ifndef __ASSEMBLY__
-extern void _mcount(void);
+void _mcount(void);
 #endif
 
 #endif
@@ -22,4 +22,8 @@ struct dyn_arch_ftrace {
 };
 #endif /*  CONFIG_DYNAMIC_FTRACE */
 
+unsigned long prepare_ftrace_return(unsigned long parent,
+                                   unsigned long self_addr,
+                                   unsigned long frame_pointer);
+
 #endif /* _ASM_SPARC64_FTRACE */
index 4f9e15c..92ded29 100644 (file)
@@ -31,7 +31,7 @@ extern unsigned long highstart_pfn, highend_pfn;
 extern pgprot_t kmap_prot;
 extern pte_t *pkmap_page_table;
 
-extern void kmap_init(void) __init;
+void kmap_init(void) __init;
 
 /*
  * Right now we initialize only a single pte table. It can be extended
@@ -49,8 +49,8 @@ extern void kmap_init(void) __init;
 
 #define PKMAP_END (PKMAP_ADDR(LAST_PKMAP))
 
-extern void *kmap_high(struct page *page);
-extern void kunmap_high(struct page *page);
+void *kmap_high(struct page *page);
+void kunmap_high(struct page *page);
 
 static inline void *kmap(struct page *page)
 {
@@ -68,8 +68,8 @@ static inline void kunmap(struct page *page)
        kunmap_high(page);
 }
 
-extern void *kmap_atomic(struct page *page);
-extern void __kunmap_atomic(void *kvaddr);
+void *kmap_atomic(struct page *page);
+void __kunmap_atomic(void *kvaddr);
 
 #define flush_cache_kmaps()    flush_cache_all()
 
index b2b9b94..04b56f8 100644 (file)
@@ -19,7 +19,7 @@ struct hvtramp_descr {
        struct hvtramp_mapping  maps[1];
 };
 
-extern void hv_cpu_startup(unsigned long hvdescr_pa);
+void hv_cpu_startup(unsigned long hvdescr_pa);
 
 #endif
 
index ca121f0..94b39ca 100644 (file)
@@ -98,7 +98,7 @@
 #define HV_FAST_MACH_EXIT              0x00
 
 #ifndef __ASSEMBLY__
-extern void sun4v_mach_exit(unsigned long exit_code);
+void sun4v_mach_exit(unsigned long exit_code);
 #endif
 
 /* Domain services.  */
@@ -127,9 +127,9 @@ extern void sun4v_mach_exit(unsigned long exit_code);
 #define HV_FAST_MACH_DESC              0x01
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
-                                    unsigned long buf_len,
-                                    unsigned long *real_buf_len);
+unsigned long sun4v_mach_desc(unsigned long buffer_pa,
+                             unsigned long buf_len,
+                             unsigned long *real_buf_len);
 #endif
 
 /* mach_sir()
@@ -148,7 +148,7 @@ extern unsigned long sun4v_mach_desc(unsigned long buffer_pa,
 #define HV_FAST_MACH_SIR               0x02
 
 #ifndef __ASSEMBLY__
-extern void sun4v_mach_sir(void);
+void sun4v_mach_sir(void);
 #endif
 
 /* mach_set_watchdog()
@@ -204,8 +204,8 @@ extern void sun4v_mach_sir(void);
 #define HV_FAST_MACH_SET_WATCHDOG      0x05
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
-                                            unsigned long *orig_timeout);
+unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
+                                     unsigned long *orig_timeout);
 #endif
 
 /* CPU services.
@@ -250,10 +250,10 @@ extern unsigned long sun4v_mach_set_watchdog(unsigned long timeout,
 #define HV_FAST_CPU_START              0x10
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_start(unsigned long cpuid,
-                                    unsigned long pc,
-                                    unsigned long rtba,
-                                    unsigned long arg0);
+unsigned long sun4v_cpu_start(unsigned long cpuid,
+                             unsigned long pc,
+                             unsigned long rtba,
+                             unsigned long arg0);
 #endif
 
 /* cpu_stop()
@@ -278,7 +278,7 @@ extern unsigned long sun4v_cpu_start(unsigned long cpuid,
 #define HV_FAST_CPU_STOP               0x11
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
+unsigned long sun4v_cpu_stop(unsigned long cpuid);
 #endif
 
 /* cpu_yield()
@@ -295,7 +295,7 @@ extern unsigned long sun4v_cpu_stop(unsigned long cpuid);
 #define HV_FAST_CPU_YIELD              0x12
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_yield(void);
+unsigned long sun4v_cpu_yield(void);
 #endif
 
 /* cpu_qconf()
@@ -341,9 +341,9 @@ extern unsigned long sun4v_cpu_yield(void);
 #define  HV_CPU_QUEUE_NONRES_ERROR      0x3f
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_qconf(unsigned long type,
-                                    unsigned long queue_paddr,
-                                    unsigned long num_queue_entries);
+unsigned long sun4v_cpu_qconf(unsigned long type,
+                             unsigned long queue_paddr,
+                             unsigned long num_queue_entries);
 #endif
 
 /* cpu_qinfo()
@@ -394,7 +394,9 @@ extern unsigned long sun4v_cpu_qconf(unsigned long type,
 #define HV_FAST_CPU_MONDO_SEND         0x42
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long cpu_list_pa, unsigned long mondo_block_pa);
+unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count,
+                                  unsigned long cpu_list_pa,
+                                  unsigned long mondo_block_pa);
 #endif
 
 /* cpu_myid()
@@ -425,7 +427,7 @@ extern unsigned long sun4v_cpu_mondo_send(unsigned long cpu_count, unsigned long
 #define  HV_CPU_STATE_ERROR             0x03
 
 #ifndef __ASSEMBLY__
-extern long sun4v_cpu_state(unsigned long cpuid);
+long sun4v_cpu_state(unsigned long cpuid);
 #endif
 
 /* cpu_set_rtba()
@@ -625,8 +627,8 @@ struct hv_fault_status {
 #define HV_FAST_MMU_TSB_CTX0           0x20
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
-                                       unsigned long tsb_desc_ra);
+unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
+                                unsigned long tsb_desc_ra);
 #endif
 
 /* mmu_tsb_ctxnon0()
@@ -710,7 +712,7 @@ extern unsigned long sun4v_mmu_tsb_ctx0(unsigned long num_descriptions,
 #define HV_FAST_MMU_DEMAP_ALL          0x24
 
 #ifndef __ASSEMBLY__
-extern void sun4v_mmu_demap_all(void);
+void sun4v_mmu_demap_all(void);
 #endif
 
 /* mmu_map_perm_addr()
@@ -740,10 +742,10 @@ extern void sun4v_mmu_demap_all(void);
 #define HV_FAST_MMU_MAP_PERM_ADDR      0x25
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
-                                            unsigned long set_to_zero,
-                                            unsigned long tte,
-                                            unsigned long flags);
+unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
+                                     unsigned long set_to_zero,
+                                     unsigned long tte,
+                                     unsigned long flags);
 #endif
 
 /* mmu_fault_area_conf()
@@ -945,7 +947,7 @@ extern unsigned long sun4v_mmu_map_perm_addr(unsigned long vaddr,
 #define HV_FAST_TOD_GET                        0x50
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_tod_get(unsigned long *time);
+unsigned long sun4v_tod_get(unsigned long *time);
 #endif
 
 /* tod_set()
@@ -962,7 +964,7 @@ extern unsigned long sun4v_tod_get(unsigned long *time);
 #define HV_FAST_TOD_SET                        0x51
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_tod_set(unsigned long time);
+unsigned long sun4v_tod_set(unsigned long time);
 #endif
 
 /* Console services */
@@ -1038,14 +1040,14 @@ extern unsigned long sun4v_tod_set(unsigned long time);
 #define HV_FAST_CONS_WRITE             0x63
 
 #ifndef __ASSEMBLY__
-extern long sun4v_con_getchar(long *status);
-extern long sun4v_con_putchar(long c);
-extern long sun4v_con_read(unsigned long buffer,
-                          unsigned long size,
-                          unsigned long *bytes_read);
-extern unsigned long sun4v_con_write(unsigned long buffer,
-                                    unsigned long size,
-                                    unsigned long *bytes_written);
+long sun4v_con_getchar(long *status);
+long sun4v_con_putchar(long c);
+long sun4v_con_read(unsigned long buffer,
+                   unsigned long size,
+                   unsigned long *bytes_read);
+unsigned long sun4v_con_write(unsigned long buffer,
+                             unsigned long size,
+                             unsigned long *bytes_written);
 #endif
 
 /* mach_set_soft_state()
@@ -1080,8 +1082,8 @@ extern unsigned long sun4v_con_write(unsigned long buffer,
 #define  HV_SOFT_STATE_TRANSITION       0x02
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
-                                              unsigned long msg_string_ra);
+unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
+                                       unsigned long msg_string_ra);
 #endif
 
 /* mach_get_soft_state()
@@ -1159,20 +1161,20 @@ extern unsigned long sun4v_mach_set_soft_state(unsigned long soft_state,
 #define HV_FAST_SVC_CLRSTATUS          0x84
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_svc_send(unsigned long svc_id,
-                                   unsigned long buffer,
-                                   unsigned long buffer_size,
-                                   unsigned long *sent_bytes);
-extern unsigned long sun4v_svc_recv(unsigned long svc_id,
-                                   unsigned long buffer,
-                                   unsigned long buffer_size,
-                                   unsigned long *recv_bytes);
-extern unsigned long sun4v_svc_getstatus(unsigned long svc_id,
-                                        unsigned long *status_bits);
-extern unsigned long sun4v_svc_setstatus(unsigned long svc_id,
-                                        unsigned long status_bits);
-extern unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
-                                        unsigned long status_bits);
+unsigned long sun4v_svc_send(unsigned long svc_id,
+                            unsigned long buffer,
+                            unsigned long buffer_size,
+                            unsigned long *sent_bytes);
+unsigned long sun4v_svc_recv(unsigned long svc_id,
+                            unsigned long buffer,
+                            unsigned long buffer_size,
+                            unsigned long *recv_bytes);
+unsigned long sun4v_svc_getstatus(unsigned long svc_id,
+                                 unsigned long *status_bits);
+unsigned long sun4v_svc_setstatus(unsigned long svc_id,
+                                 unsigned long status_bits);
+unsigned long sun4v_svc_clrstatus(unsigned long svc_id,
+                                 unsigned long status_bits);
 #endif
 
 /* Trap trace services.
@@ -1458,8 +1460,8 @@ struct hv_trap_trace_entry {
 #define HV_FAST_INTR_DEVINO2SYSINO     0xa0
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
-                                           unsigned long devino);
+unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
+                                    unsigned long devino);
 #endif
 
 /* intr_getenabled()
@@ -1476,7 +1478,7 @@ extern unsigned long sun4v_devino_to_sysino(unsigned long devhandle,
 #define HV_FAST_INTR_GETENABLED                0xa1
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
+unsigned long sun4v_intr_getenabled(unsigned long sysino);
 #endif
 
 /* intr_setenabled()
@@ -1492,7 +1494,8 @@ extern unsigned long sun4v_intr_getenabled(unsigned long sysino);
 #define HV_FAST_INTR_SETENABLED                0xa2
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long intr_enabled);
+unsigned long sun4v_intr_setenabled(unsigned long sysino,
+                                   unsigned long intr_enabled);
 #endif
 
 /* intr_getstate()
@@ -1508,7 +1511,7 @@ extern unsigned long sun4v_intr_setenabled(unsigned long sysino, unsigned long i
 #define HV_FAST_INTR_GETSTATE          0xa3
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_getstate(unsigned long sysino);
+unsigned long sun4v_intr_getstate(unsigned long sysino);
 #endif
 
 /* intr_setstate()
@@ -1528,7 +1531,7 @@ extern unsigned long sun4v_intr_getstate(unsigned long sysino);
 #define HV_FAST_INTR_SETSTATE          0xa4
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
+unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long intr_state);
 #endif
 
 /* intr_gettarget()
@@ -1546,7 +1549,7 @@ extern unsigned long sun4v_intr_setstate(unsigned long sysino, unsigned long int
 #define HV_FAST_INTR_GETTARGET         0xa5
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
+unsigned long sun4v_intr_gettarget(unsigned long sysino);
 #endif
 
 /* intr_settarget()
@@ -1563,7 +1566,7 @@ extern unsigned long sun4v_intr_gettarget(unsigned long sysino);
 #define HV_FAST_INTR_SETTARGET         0xa6
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
+unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cpuid);
 #endif
 
 /* vintr_get_cookie()
@@ -1647,30 +1650,30 @@ extern unsigned long sun4v_intr_settarget(unsigned long sysino, unsigned long cp
 #define HV_FAST_VINTR_SET_TARGET       0xae
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long *cookie);
-extern unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long cookie);
-extern unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long *valid);
-extern unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long valid);
-extern unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long *state);
-extern unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
-                                          unsigned long dev_ino,
-                                          unsigned long state);
-extern unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long *cpuid);
-extern unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
-                                           unsigned long dev_ino,
-                                           unsigned long cpuid);
+unsigned long sun4v_vintr_get_cookie(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long *cookie);
+unsigned long sun4v_vintr_set_cookie(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long cookie);
+unsigned long sun4v_vintr_get_valid(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long *valid);
+unsigned long sun4v_vintr_set_valid(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long valid);
+unsigned long sun4v_vintr_get_state(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long *state);
+unsigned long sun4v_vintr_set_state(unsigned long dev_handle,
+                                   unsigned long dev_ino,
+                                   unsigned long state);
+unsigned long sun4v_vintr_get_target(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long *cpuid);
+unsigned long sun4v_vintr_set_target(unsigned long dev_handle,
+                                    unsigned long dev_ino,
+                                    unsigned long cpuid);
 #endif
 
 /* PCI IO services.
@@ -2627,50 +2630,50 @@ struct ldc_mtable_entry {
 #define HV_FAST_LDC_REVOKE             0xef
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
-                                       unsigned long ra,
-                                       unsigned long num_entries);
-extern unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
-                                       unsigned long *ra,
-                                       unsigned long *num_entries);
-extern unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
-                                           unsigned long *head_off,
-                                           unsigned long *tail_off,
-                                           unsigned long *chan_state);
-extern unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
-                                           unsigned long tail_off);
-extern unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
-                                       unsigned long ra,
-                                       unsigned long num_entries);
-extern unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
-                                       unsigned long *ra,
-                                       unsigned long *num_entries);
-extern unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
-                                           unsigned long *head_off,
-                                           unsigned long *tail_off,
-                                           unsigned long *chan_state);
-extern unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
-                                           unsigned long head_off);
-extern unsigned long sun4v_ldc_set_map_table(unsigned long channel,
-                                            unsigned long ra,
-                                            unsigned long num_entries);
-extern unsigned long sun4v_ldc_get_map_table(unsigned long channel,
-                                            unsigned long *ra,
-                                            unsigned long *num_entries);
-extern unsigned long sun4v_ldc_copy(unsigned long channel,
-                                   unsigned long dir_code,
-                                   unsigned long tgt_raddr,
-                                   unsigned long lcl_raddr,
-                                   unsigned long len,
-                                   unsigned long *actual_len);
-extern unsigned long sun4v_ldc_mapin(unsigned long channel,
-                                    unsigned long cookie,
-                                    unsigned long *ra,
-                                    unsigned long *perm);
-extern unsigned long sun4v_ldc_unmap(unsigned long ra);
-extern unsigned long sun4v_ldc_revoke(unsigned long channel,
-                                     unsigned long cookie,
-                                     unsigned long mte_cookie);
+unsigned long sun4v_ldc_tx_qconf(unsigned long channel,
+                                unsigned long ra,
+                                unsigned long num_entries);
+unsigned long sun4v_ldc_tx_qinfo(unsigned long channel,
+                                unsigned long *ra,
+                                unsigned long *num_entries);
+unsigned long sun4v_ldc_tx_get_state(unsigned long channel,
+                                    unsigned long *head_off,
+                                    unsigned long *tail_off,
+                                    unsigned long *chan_state);
+unsigned long sun4v_ldc_tx_set_qtail(unsigned long channel,
+                                    unsigned long tail_off);
+unsigned long sun4v_ldc_rx_qconf(unsigned long channel,
+                                unsigned long ra,
+                                unsigned long num_entries);
+unsigned long sun4v_ldc_rx_qinfo(unsigned long channel,
+                                unsigned long *ra,
+                                unsigned long *num_entries);
+unsigned long sun4v_ldc_rx_get_state(unsigned long channel,
+                                    unsigned long *head_off,
+                                    unsigned long *tail_off,
+                                    unsigned long *chan_state);
+unsigned long sun4v_ldc_rx_set_qhead(unsigned long channel,
+                                    unsigned long head_off);
+unsigned long sun4v_ldc_set_map_table(unsigned long channel,
+                                     unsigned long ra,
+                                     unsigned long num_entries);
+unsigned long sun4v_ldc_get_map_table(unsigned long channel,
+                                     unsigned long *ra,
+                                     unsigned long *num_entries);
+unsigned long sun4v_ldc_copy(unsigned long channel,
+                            unsigned long dir_code,
+                            unsigned long tgt_raddr,
+                            unsigned long lcl_raddr,
+                            unsigned long len,
+                            unsigned long *actual_len);
+unsigned long sun4v_ldc_mapin(unsigned long channel,
+                             unsigned long cookie,
+                             unsigned long *ra,
+                             unsigned long *perm);
+unsigned long sun4v_ldc_unmap(unsigned long ra);
+unsigned long sun4v_ldc_revoke(unsigned long channel,
+                              unsigned long cookie,
+                              unsigned long mte_cookie);
 #endif
 
 /* Performance counter services.  */
@@ -2727,14 +2730,14 @@ extern unsigned long sun4v_ldc_revoke(unsigned long channel,
 #define HV_FAST_N2_SET_PERFREG         0x105
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_niagara_getperf(unsigned long reg,
-                                          unsigned long *val);
-extern unsigned long sun4v_niagara_setperf(unsigned long reg,
-                                          unsigned long val);
-extern unsigned long sun4v_niagara2_getperf(unsigned long reg,
-                                           unsigned long *val);
-extern unsigned long sun4v_niagara2_setperf(unsigned long reg,
-                                           unsigned long val);
+unsigned long sun4v_niagara_getperf(unsigned long reg,
+                                   unsigned long *val);
+unsigned long sun4v_niagara_setperf(unsigned long reg,
+                                   unsigned long val);
+unsigned long sun4v_niagara2_getperf(unsigned long reg,
+                                    unsigned long *val);
+unsigned long sun4v_niagara2_setperf(unsigned long reg,
+                                    unsigned long val);
 #endif
 
 /* MMU statistics services.
@@ -2829,8 +2832,8 @@ struct hv_mmu_statistics {
 #define HV_FAST_MMUSTAT_INFO           0x103
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
-extern unsigned long sun4v_mmustat_info(unsigned long *ra);
+unsigned long sun4v_mmustat_conf(unsigned long ra, unsigned long *orig_ra);
+unsigned long sun4v_mmustat_info(unsigned long *ra);
 #endif
 
 /* NCS crypto services  */
@@ -2919,9 +2922,9 @@ struct hv_ncs_qtail_update_arg {
 #define HV_FAST_NCS_REQUEST            0x110
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_ncs_request(unsigned long request,
-                                      unsigned long arg_ra,
-                                      unsigned long arg_size);
+unsigned long sun4v_ncs_request(unsigned long request,
+                               unsigned long arg_ra,
+                               unsigned long arg_size);
 #endif
 
 #define HV_FAST_FIRE_GET_PERFREG       0x120
@@ -2930,18 +2933,18 @@ extern unsigned long sun4v_ncs_request(unsigned long request,
 #define HV_FAST_REBOOT_DATA_SET                0x172
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_reboot_data_set(unsigned long ra,
-                                          unsigned long len);
+unsigned long sun4v_reboot_data_set(unsigned long ra,
+                                   unsigned long len);
 #endif
 
 #define HV_FAST_VT_GET_PERFREG         0x184
 #define HV_FAST_VT_SET_PERFREG         0x185
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
-                                         unsigned long *reg_val);
-extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
-                                         unsigned long reg_val);
+unsigned long sun4v_vt_get_perfreg(unsigned long reg_num,
+                                  unsigned long *reg_val);
+unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
+                                  unsigned long reg_val);
 #endif
 
 /* Function numbers for HV_CORE_TRAP.  */
@@ -2978,21 +2981,21 @@ extern unsigned long sun4v_vt_set_perfreg(unsigned long reg_num,
 #define HV_GRP_DIAG                    0x0300
 
 #ifndef __ASSEMBLY__
-extern unsigned long sun4v_get_version(unsigned long group,
-                                      unsigned long *major,
-                                      unsigned long *minor);
-extern unsigned long sun4v_set_version(unsigned long group,
-                                      unsigned long major,
-                                      unsigned long minor,
-                                      unsigned long *actual_minor);
-
-extern int sun4v_hvapi_register(unsigned long group, unsigned long major,
-                               unsigned long *minor);
-extern void sun4v_hvapi_unregister(unsigned long group);
-extern int sun4v_hvapi_get(unsigned long group,
-                          unsigned long *major,
-                          unsigned long *minor);
-extern void sun4v_hvapi_init(void);
+unsigned long sun4v_get_version(unsigned long group,
+                               unsigned long *major,
+                               unsigned long *minor);
+unsigned long sun4v_set_version(unsigned long group,
+                               unsigned long major,
+                               unsigned long minor,
+                               unsigned long *actual_minor);
+
+int sun4v_hvapi_register(unsigned long group, unsigned long major,
+                        unsigned long *minor);
+void sun4v_hvapi_unregister(unsigned long group);
+int sun4v_hvapi_get(unsigned long group,
+                   unsigned long *major,
+                   unsigned long *minor);
+void sun4v_hvapi_init(void);
 #endif
 
 #endif /* !(_SPARC64_HYPERVISOR_H) */
index 6976aa2..3793f7f 100644 (file)
@@ -20,6 +20,6 @@ struct idprom {
 };
 
 extern struct idprom *idprom;
-extern void idprom_init(void);
+void idprom_init(void);
 
 #endif /* !(_SPARC_IDPROM_H) */
index 01ab2f6..04a9701 100644 (file)
@@ -43,7 +43,7 @@
 struct iounit_struct {
        unsigned long           bmap[(IOUNIT_DMA_SIZE >> (PAGE_SHIFT + 3)) / sizeof(unsigned long)];
        spinlock_t              lock;
-       iopte_t                 *page_table;
+       iopte_t __iomem         *page_table;
        unsigned long           rotor[3];
        unsigned long           limit[4];
 };
index c1acbd8..9f53290 100644 (file)
 #define __SPARC_IO_H
 
 #include <linux/kernel.h>
-#include <linux/types.h>
 #include <linux/ioport.h>  /* struct resource */
 
-#include <asm/page.h>      /* IO address mapping routines need this */
-#include <asm-generic/pci_iomap.h>
-
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
-
-static inline u32 flip_dword (u32 l)
-{
-       return ((l&0xff)<<24) | (((l>>8)&0xff)<<16) | (((l>>16)&0xff)<<8)| ((l>>24)&0xff);
-}
-
-static inline u16 flip_word (u16 w)
-{
-       return ((w&0xff) << 8) | ((w>>8)&0xff);
-}
-
-#define mmiowb()
-
-/*
- * Memory mapped I/O to PCI
- */
-
-static inline u8 __raw_readb(const volatile void __iomem *addr)
-{
-       return *(__force volatile u8 *)addr;
-}
-
-static inline u16 __raw_readw(const volatile void __iomem *addr)
-{
-       return *(__force volatile u16 *)addr;
-}
-
-static inline u32 __raw_readl(const volatile void __iomem *addr)
-{
-       return *(__force volatile u32 *)addr;
-}
+#define readb_relaxed(__addr)  readb(__addr)
+#define readw_relaxed(__addr)  readw(__addr)
+#define readl_relaxed(__addr)  readl(__addr)
 
-static inline void __raw_writeb(u8 b, volatile void __iomem *addr)
-{
-       *(__force volatile u8 *)addr = b;
-}
+#define IO_SPACE_LIMIT 0xffffffff
 
-static inline void __raw_writew(u16 w, volatile void __iomem *addr)
-{
-       *(__force volatile u16 *)addr = w;
-}
+#define memset_io(d,c,sz)     _memset_io(d,c,sz)
+#define memcpy_fromio(d,s,sz) _memcpy_fromio(d,s,sz)
+#define memcpy_toio(d,s,sz)   _memcpy_toio(d,s,sz)
 
-static inline void __raw_writel(u32 l, volatile void __iomem *addr)
-{
-       *(__force volatile u32 *)addr = l;
-}
+#include <asm-generic/io.h>
 
-static inline u8 __readb(const volatile void __iomem *addr)
+static inline void _memset_io(volatile void __iomem *dst,
+                              int c, __kernel_size_t n)
 {
-       return *(__force volatile u8 *)addr;
-}
+       volatile void __iomem *d = dst;
 
-static inline u16 __readw(const volatile void __iomem *addr)
-{
-       return flip_word(*(__force volatile u16 *)addr);
+       while (n--) {
+               writeb(c, d);
+               d++;
+       }
 }
 
-static inline u32 __readl(const volatile void __iomem *addr)
+static inline void _memcpy_fromio(void *dst, const volatile void __iomem *src,
+                                  __kernel_size_t n)
 {
-       return flip_dword(*(__force volatile u32 *)addr);
-}
+       char *d = dst;
 
-static inline void __writeb(u8 b, volatile void __iomem *addr)
-{
-       *(__force volatile u8 *)addr = b;
+       while (n--) {
+               char tmp = readb(src);
+               *d++ = tmp;
+               src++;
+       }
 }
 
-static inline void __writew(u16 w, volatile void __iomem *addr)
+static inline void _memcpy_toio(volatile void __iomem *dst, const void *src,
+                                __kernel_size_t n)
 {
-       *(__force volatile u16 *)addr = flip_word(w);
-}
+       const char *s = src;
+       volatile void __iomem *d = dst;
 
-static inline void __writel(u32 l, volatile void __iomem *addr)
-{
-       *(__force volatile u32 *)addr = flip_dword(l);
+       while (n--) {
+               char tmp = *s++;
+               writeb(tmp, d);
+               d++;
+       }
 }
 
-#define readb(__addr)          __readb(__addr)
-#define readw(__addr)          __readw(__addr)
-#define readl(__addr)          __readl(__addr)
-#define readb_relaxed(__addr)  readb(__addr)
-#define readw_relaxed(__addr)  readw(__addr)
-#define readl_relaxed(__addr)  readl(__addr)
-
-#define writeb(__b, __addr)    __writeb((__b),(__addr))
-#define writew(__w, __addr)    __writew((__w),(__addr))
-#define writel(__l, __addr)    __writel((__l),(__addr))
-
-/*
- * I/O space operations
- *
- * Arrangement on a Sun is somewhat complicated.
- *
- * First of all, we want to use standard Linux drivers
- * for keyboard, PC serial, etc. These drivers think
- * they access I/O space and use inb/outb.
- * On the other hand, EBus bridge accepts PCI *memory*
- * cycles and converts them into ISA *I/O* cycles.
- * Ergo, we want inb & outb to generate PCI memory cycles.
- *
- * If we want to issue PCI *I/O* cycles, we do this
- * with a low 64K fixed window in PCIC. This window gets
- * mapped somewhere into virtual kernel space and we
- * can use inb/outb again.
- */
-#define inb_local(__addr)      __readb((void __iomem *)(unsigned long)(__addr))
-#define inb(__addr)            __readb((void __iomem *)(unsigned long)(__addr))
-#define inw(__addr)            __readw((void __iomem *)(unsigned long)(__addr))
-#define inl(__addr)            __readl((void __iomem *)(unsigned long)(__addr))
-
-#define outb_local(__b, __addr)        __writeb(__b, (void __iomem *)(unsigned long)(__addr))
-#define outb(__b, __addr)      __writeb(__b, (void __iomem *)(unsigned long)(__addr))
-#define outw(__w, __addr)      __writew(__w, (void __iomem *)(unsigned long)(__addr))
-#define outl(__l, __addr)      __writel(__l, (void __iomem *)(unsigned long)(__addr))
-
-#define inb_p(__addr)          inb(__addr)
-#define outb_p(__b, __addr)    outb(__b, __addr)
-#define inw_p(__addr)          inw(__addr)
-#define outw_p(__w, __addr)    outw(__w, __addr)
-#define inl_p(__addr)          inl(__addr)
-#define outl_p(__l, __addr)    outl(__l, __addr)
-
-void outsb(unsigned long addr, const void *src, unsigned long cnt);
-void outsw(unsigned long addr, const void *src, unsigned long cnt);
-void outsl(unsigned long addr, const void *src, unsigned long cnt);
-void insb(unsigned long addr, void *dst, unsigned long count);
-void insw(unsigned long addr, void *dst, unsigned long count);
-void insl(unsigned long addr, void *dst, unsigned long count);
-
-#define IO_SPACE_LIMIT 0xffffffff
-
 /*
  * SBus accessors.
  *
  * SBus has only one, memory mapped, I/O space.
  * We do not need to flip bytes for SBus of course.
  */
-static inline u8 _sbus_readb(const volatile void __iomem *addr)
+static inline u8 sbus_readb(const volatile void __iomem *addr)
 {
        return *(__force volatile u8 *)addr;
 }
 
-static inline u16 _sbus_readw(const volatile void __iomem *addr)
+static inline u16 sbus_readw(const volatile void __iomem *addr)
 {
        return *(__force volatile u16 *)addr;
 }
 
-static inline u32 _sbus_readl(const volatile void __iomem *addr)
+static inline u32 sbus_readl(const volatile void __iomem *addr)
 {
        return *(__force volatile u32 *)addr;
 }
 
-static inline void _sbus_writeb(u8 b, volatile void __iomem *addr)
+static inline void sbus_writeb(u8 b, volatile void __iomem *addr)
 {
        *(__force volatile u8 *)addr = b;
 }
 
-static inline void _sbus_writew(u16 w, volatile void __iomem *addr)
+static inline void sbus_writew(u16 w, volatile void __iomem *addr)
 {
        *(__force volatile u16 *)addr = w;
 }
 
-static inline void _sbus_writel(u32 l, volatile void __iomem *addr)
+static inline void sbus_writel(u32 l, volatile void __iomem *addr)
 {
        *(__force volatile u32 *)addr = l;
 }
 
-/*
- * The only reason for #define's is to hide casts to unsigned long.
- */
-#define sbus_readb(__addr)             _sbus_readb(__addr)
-#define sbus_readw(__addr)             _sbus_readw(__addr)
-#define sbus_readl(__addr)             _sbus_readl(__addr)
-#define sbus_writeb(__b, __addr)       _sbus_writeb(__b, __addr)
-#define sbus_writew(__w, __addr)       _sbus_writew(__w, __addr)
-#define sbus_writel(__l, __addr)       _sbus_writel(__l, __addr)
-
-static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_size_t n)
+static inline void sbus_memset_io(volatile void __iomem *__dst, int c,
+                                  __kernel_size_t n)
 {
        while(n--) {
                sbus_writeb(c, __dst);
@@ -194,22 +97,9 @@ static inline void sbus_memset_io(volatile void __iomem *__dst, int c, __kernel_
        }
 }
 
-static inline void
-_memset_io(volatile void __iomem *dst, int c, __kernel_size_t n)
-{
-       volatile void __iomem *d = dst;
-
-       while (n--) {
-               writeb(c, d);
-               d++;
-       }
-}
-
-#define memset_io(d,c,sz)      _memset_io(d,c,sz)
-
-static inline void
-_sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
-                   __kernel_size_t n)
+static inline void sbus_memcpy_fromio(void *dst,
+                                      const volatile void __iomem *src,
+                                      __kernel_size_t n)
 {
        char *d = dst;
 
@@ -220,25 +110,9 @@ _sbus_memcpy_fromio(void *dst, const volatile void __iomem *src,
        }
 }
 
-#define sbus_memcpy_fromio(d, s, sz)   _sbus_memcpy_fromio(d, s, sz)
-
-static inline void
-_memcpy_fromio(void *dst, const volatile void __iomem *src, __kernel_size_t n)
-{
-       char *d = dst;
-
-       while (n--) {
-               char tmp = readb(src);
-               *d++ = tmp;
-               src++;
-       }
-}
-
-#define memcpy_fromio(d,s,sz)  _memcpy_fromio(d,s,sz)
-
-static inline void
-_sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
-                 __kernel_size_t n)
+static inline void sbus_memcpy_toio(volatile void __iomem *dst,
+                                    const void *src,
+                                    __kernel_size_t n)
 {
        const char *s = src;
        volatile void __iomem *d = dst;
@@ -250,81 +124,26 @@ _sbus_memcpy_toio(volatile void __iomem *dst, const void *src,
        }
 }
 
-#define sbus_memcpy_toio(d, s, sz)     _sbus_memcpy_toio(d, s, sz)
-
-static inline void
-_memcpy_toio(volatile void __iomem *dst, const void *src, __kernel_size_t n)
-{
-       const char *s = src;
-       volatile void __iomem *d = dst;
-
-       while (n--) {
-               char tmp = *s++;
-               writeb(tmp, d);
-               d++;
-       }
-}
-
-#define memcpy_toio(d,s,sz)    _memcpy_toio(d,s,sz)
-
 #ifdef __KERNEL__
 
 /*
  * Bus number may be embedded in the higher bits of the physical address.
  * This is why we have no bus number argument to ioremap().
  */
-extern void __iomem *ioremap(unsigned long offset, unsigned long size);
+void __iomem *ioremap(unsigned long offset, unsigned long size);
 #define ioremap_nocache(X,Y)   ioremap((X),(Y))
 #define ioremap_wc(X,Y)                ioremap((X),(Y))
-extern void iounmap(volatile void __iomem *addr);
-
-#define ioread8(X)                     readb(X)
-#define ioread16(X)                    readw(X)
-#define ioread16be(X)                  __raw_readw(X)
-#define ioread32(X)                    readl(X)
-#define ioread32be(X)                  __raw_readl(X)
-#define iowrite8(val,X)                        writeb(val,X)
-#define iowrite16(val,X)               writew(val,X)
-#define iowrite16be(val,X)             __raw_writew(val,X)
-#define iowrite32(val,X)               writel(val,X)
-#define iowrite32be(val,X)             __raw_writel(val,X)
-
-static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
-{
-       insb((unsigned long __force)port, buf, count);
-}
-static inline void ioread16_rep(void __iomem *port, void *buf, unsigned long count)
-{
-       insw((unsigned long __force)port, buf, count);
-}
-
-static inline void ioread32_rep(void __iomem *port, void *buf, unsigned long count)
-{
-       insl((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite8_rep(void __iomem *port, const void *buf, unsigned long count)
-{
-       outsb((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite16_rep(void __iomem *port, const void *buf, unsigned long count)
-{
-       outsw((unsigned long __force)port, buf, count);
-}
-
-static inline void iowrite32_rep(void __iomem *port, const void *buf, unsigned long count)
-{
-       outsl((unsigned long __force)port, buf, count);
-}
+void iounmap(volatile void __iomem *addr);
 
 /* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
+void __iomem *ioport_map(unsigned long port, unsigned int nr);
+void ioport_unmap(void __iomem *);
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+void pci_iounmap(struct pci_dev *dev, void __iomem *);
+
+
 
 /*
  * At the moment, we do not use CMOS_READ anywhere outside of rtc.c,
@@ -343,21 +162,11 @@ static inline int sbus_can_burst64(void)
        return 0; /* actually, sparc_cpu_model==sun4d */
 }
 struct device;
-extern void sbus_set_sbus64(struct device *, int);
+void sbus_set_sbus64(struct device *, int);
 
 #endif
 
 #define __ARCH_HAS_NO_PAGE_ZERO_MAPPED         1
 
-/*
- * Convert a physical pointer to a virtual kernel pointer for /dev/mem
- * access
- */
-#define xlate_dev_mem_ptr(p)   __va(p)
-
-/*
- * Convert a virtual cached pointer to an uncached pointer
- */
-#define xlate_dev_kmem_ptr(p)  p
 
 #endif /* !(__SPARC_IO_H) */
index 09b0b88..05381c3 100644 (file)
@@ -15,7 +15,6 @@
 
 /* BIO layer definitions. */
 extern unsigned long kern_base, kern_size;
-#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
 
 static inline u8 _inb(unsigned long addr)
 {
@@ -91,12 +90,12 @@ static inline void _outl(u32 l, unsigned long addr)
 #define inl_p(__addr)          inl(__addr)
 #define outl_p(__l, __addr)    outl(__l, __addr)
 
-extern void outsb(unsigned long, const void *, unsigned long);
-extern void outsw(unsigned long, const void *, unsigned long);
-extern void outsl(unsigned long, const void *, unsigned long);
-extern void insb(unsigned long, void *, unsigned long);
-extern void insw(unsigned long, void *, unsigned long);
-extern void insl(unsigned long, void *, unsigned long);
+void outsb(unsigned long, const void *, unsigned long);
+void outsw(unsigned long, const void *, unsigned long);
+void outsl(unsigned long, const void *, unsigned long);
+void insb(unsigned long, void *, unsigned long);
+void insw(unsigned long, void *, unsigned long);
+void insl(unsigned long, void *, unsigned long);
 
 static inline void ioread8_rep(void __iomem *port, void *buf, unsigned long count)
 {
@@ -509,12 +508,12 @@ static inline void iounmap(volatile void __iomem *addr)
 #define iowrite32be(val,X)             __raw_writel(val,X)
 
 /* Create a virtual mapping cookie for an IO port range */
-extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
-extern void ioport_unmap(void __iomem *);
+void __iomem *ioport_map(unsigned long port, unsigned int nr);
+void ioport_unmap(void __iomem *);
 
 /* Create a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
-extern void pci_iounmap(struct pci_dev *dev, void __iomem *);
+void pci_iounmap(struct pci_dev *dev, void __iomem *);
 
 static inline int sbus_can_dma_64bit(void)
 {
@@ -525,7 +524,7 @@ static inline int sbus_can_burst64(void)
        return 1;
 }
 struct device;
-extern void sbus_set_sbus64(struct device *, int);
+void sbus_set_sbus64(struct device *, int);
 
 /*
  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
index 70c589c..f6c066b 100644 (file)
@@ -99,7 +99,7 @@ struct iommu_regs {
 #define IOPTE_WAZ           0x00000001 /* Write as zeros */
 
 struct iommu_struct {
-       struct iommu_regs *regs;
+       struct iommu_regs __iomem *regs;
        iopte_t *page_table;
        /* For convenience */
        unsigned long start; /* First managed virtual address */
@@ -108,14 +108,14 @@ struct iommu_struct {
        struct bit_map usemap;
 };
 
-static inline void iommu_invalidate(struct iommu_regs *regs)
+static inline void iommu_invalidate(struct iommu_regs __iomem *regs)
 {
-       regs->tlbflush = 0;
+       sbus_writel(0, &regs->tlbflush);
 }
 
-static inline void iommu_invalidate_page(struct iommu_regs *regs, unsigned long ba)
+static inline void iommu_invalidate_page(struct iommu_regs __iomem *regs, unsigned long ba)
 {
-       regs->pageflush = (ba & PAGE_MASK);
+       sbus_writel(ba & PAGE_MASK, &regs->pageflush);
 }
 
 #endif /* !(_SPARC_IOMMU_H) */
index caf798b..2b9321a 100644 (file)
@@ -58,8 +58,8 @@ struct strbuf {
        volatile unsigned long  __flushflag_buf[(64+(64-1)) / sizeof(long)];
 };
 
-extern int iommu_table_init(struct iommu *iommu, int tsbsize,
-                           u32 dma_offset, u32 dma_addr_mask,
-                           int numa_node);
+int iommu_table_init(struct iommu *iommu, int tsbsize,
+                    u32 dma_offset, u32 dma_addr_mask,
+                    int numa_node);
 
 #endif /* !(_SPARC64_IOMMU_H) */
index 2ae3aca..eecd3d8 100644 (file)
@@ -16,7 +16,8 @@
 
 #define irq_canonicalize(irq)  (irq)
 
-extern void __init init_IRQ(void);
+void __init init_IRQ(void);
+void __init sun4d_init_sbi_irq(void);
 
 #define NO_IRQ         0xffffffff
 
index abf6afe..91d2193 100644 (file)
  */
 #define NR_IRQS    255
 
-extern void irq_install_pre_handler(int irq,
-                                   void (*func)(unsigned int, void *, void *),
-                                   void *arg1, void *arg2);
+void irq_install_pre_handler(int irq,
+                            void (*func)(unsigned int, void *, void *),
+                            void *arg1, void *arg2);
 #define irq_canonicalize(irq)  (irq)
-extern unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
-extern unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
-extern unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
-                                   unsigned int msi_devino_start,
-                                   unsigned int msi_devino_end);
-extern void sun4v_destroy_msi(unsigned int irq);
-extern unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
-                                   unsigned int msi_devino_start,
-                                   unsigned int msi_devino_end,
-                                   unsigned long imap_base,
-                                   unsigned long iclr_base);
-extern void sun4u_destroy_msi(unsigned int irq);
-
-extern unsigned char irq_alloc(unsigned int dev_handle,
-                                   unsigned int dev_ino);
+unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap);
+unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino);
+unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino);
+unsigned int sun4v_build_msi(u32 devhandle, unsigned int *irq_p,
+                            unsigned int msi_devino_start,
+                            unsigned int msi_devino_end);
+void sun4v_destroy_msi(unsigned int irq);
+unsigned int sun4u_build_msi(u32 portid, unsigned int *irq_p,
+                            unsigned int msi_devino_start,
+                            unsigned int msi_devino_end,
+                            unsigned long imap_base,
+                            unsigned long iclr_base);
+void sun4u_destroy_msi(unsigned int irq);
+
+unsigned char irq_alloc(unsigned int dev_handle,
+                       unsigned int dev_ino);
 #ifdef CONFIG_PCI_MSI
-extern void irq_free(unsigned int irq);
+void irq_free(unsigned int irq);
 #endif
 
-extern void __init init_IRQ(void);
-extern void fixup_irqs(void);
+void __init init_IRQ(void);
+void fixup_irqs(void);
 
 static inline void set_softint(unsigned long bits)
 {
@@ -89,7 +89,7 @@ static inline unsigned long get_softint(void)
        return retval;
 }
 
-void arch_trigger_all_cpu_backtrace(void);
+void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 
 extern void *hardirq_stack[NR_CPUS];
index e414c06..71cc284 100644 (file)
@@ -15,9 +15,9 @@
 #include <linux/types.h>
 #include <asm/psr.h>
 
-extern void arch_local_irq_restore(unsigned long);
-extern unsigned long arch_local_irq_save(void);
-extern void arch_local_irq_enable(void);
+void arch_local_irq_restore(unsigned long);
+unsigned long arch_local_irq_save(void);
+void arch_local_irq_enable(void);
 
 static inline notrace unsigned long arch_local_save_flags(void)
 {
index feb3578..04465de 100644 (file)
@@ -3,7 +3,7 @@
 
 struct pt_regs;
 
-extern void bad_trap(struct pt_regs *, long);
+void bad_trap(struct pt_regs *, long);
 
 /* Grossly misnamed. */
 enum die_val {
index b6ef301..47366af 100644 (file)
@@ -28,9 +28,12 @@ enum regnames {
 #define NUMREGBYTES            ((GDB_CSR + 1) * 4)
 #else
 #define NUMREGBYTES            ((GDB_Y + 1) * 8)
+
+struct pt_regs;
+asmlinkage void kgdb_trap(unsigned long trap_level, struct pt_regs *regs);
 #endif
 
-extern void arch_kgdb_breakpoint(void);
+void arch_kgdb_breakpoint(void);
 
 #define BREAK_INSTR_SIZE       4
 #define CACHE_FLUSH_IS_SAFE    1
index 5879d71..a145d79 100644 (file)
@@ -43,7 +43,9 @@ struct kprobe_ctlblk {
        struct prev_kprobe prev_kprobe;
 };
 
-extern int kprobe_exceptions_notify(struct notifier_block *self,
-                                   unsigned long val, void *data);
-extern int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+int kprobe_exceptions_notify(struct notifier_block *self,
+                            unsigned long val, void *data);
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr);
+asmlinkage void __kprobes kprobe_trap(unsigned long trap_level,
+                                     struct pt_regs *regs);
 #endif /* _SPARC64_KPROBES_H */
index bdb524a..c8c67f6 100644 (file)
@@ -4,9 +4,9 @@
 #include <asm/hypervisor.h>
 
 extern int ldom_domaining_enabled;
-extern void ldom_set_var(const char *var, const char *value);
-extern void ldom_reboot(const char *boot_command);
-extern void ldom_power_off(void);
+void ldom_set_var(const char *var, const char *value);
+void ldom_reboot(const char *boot_command);
+void ldom_power_off(void);
 
 /* The event handler will be evoked when link state changes
  * or data becomes available on the receive side.
@@ -51,30 +51,30 @@ struct ldc_channel_config {
 struct ldc_channel;
 
 /* Allocate state for a channel.  */
-extern struct ldc_channel *ldc_alloc(unsigned long id,
-                                    const struct ldc_channel_config *cfgp,
-                                    void *event_arg);
+struct ldc_channel *ldc_alloc(unsigned long id,
+                             const struct ldc_channel_config *cfgp,
+                             void *event_arg);
 
 /* Shut down and free state for a channel.  */
-extern void ldc_free(struct ldc_channel *lp);
+void ldc_free(struct ldc_channel *lp);
 
 /* Register TX and RX queues of the link with the hypervisor.  */
-extern int ldc_bind(struct ldc_channel *lp, const char *name);
+int ldc_bind(struct ldc_channel *lp, const char *name);
 
 /* For non-RAW protocols we need to complete a handshake before
  * communication can proceed.  ldc_connect() does that, if the
  * handshake completes successfully, an LDC_EVENT_UP event will
  * be sent up to the driver.
  */
-extern int ldc_connect(struct ldc_channel *lp);
-extern int ldc_disconnect(struct ldc_channel *lp);
+int ldc_connect(struct ldc_channel *lp);
+int ldc_disconnect(struct ldc_channel *lp);
 
-extern int ldc_state(struct ldc_channel *lp);
+int ldc_state(struct ldc_channel *lp);
 
 /* Read and write operations.  Only valid when the link is up.  */
-extern int ldc_write(struct ldc_channel *lp, const void *buf,
-                    unsigned int size);
-extern int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
+int ldc_write(struct ldc_channel *lp, const void *buf,
+             unsigned int size);
+int ldc_read(struct ldc_channel *lp, void *buf, unsigned int size);
 
 #define LDC_MAP_SHADOW 0x01
 #define LDC_MAP_DIRECT 0x02
@@ -92,22 +92,22 @@ struct ldc_trans_cookie {
 };
 
 struct scatterlist;
-extern int ldc_map_sg(struct ldc_channel *lp,
-                     struct scatterlist *sg, int num_sg,
-                     struct ldc_trans_cookie *cookies, int ncookies,
-                     unsigned int map_perm);
+int ldc_map_sg(struct ldc_channel *lp,
+              struct scatterlist *sg, int num_sg,
+              struct ldc_trans_cookie *cookies, int ncookies,
+              unsigned int map_perm);
 
-extern int ldc_map_single(struct ldc_channel *lp,
-                         void *buf, unsigned int len,
-                         struct ldc_trans_cookie *cookies, int ncookies,
-                         unsigned int map_perm);
+int ldc_map_single(struct ldc_channel *lp,
+                  void *buf, unsigned int len,
+                  struct ldc_trans_cookie *cookies, int ncookies,
+                  unsigned int map_perm);
 
-extern void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
-                     int ncookies);
+void ldc_unmap(struct ldc_channel *lp, struct ldc_trans_cookie *cookies,
+              int ncookies);
 
-extern int ldc_copy(struct ldc_channel *lp, int copy_dir,
-                   void *buf, unsigned int len, unsigned long offset,
-                   struct ldc_trans_cookie *cookies, int ncookies);
+int ldc_copy(struct ldc_channel *lp, int copy_dir,
+            void *buf, unsigned int len, unsigned long offset,
+            struct ldc_trans_cookie *cookies, int ncookies);
 
 static inline int ldc_get_dring_entry(struct ldc_channel *lp,
                                      void *buf, unsigned int len,
@@ -127,12 +127,12 @@ static inline int ldc_put_dring_entry(struct ldc_channel *lp,
        return ldc_copy(lp, LDC_COPY_OUT, buf, len, offset, cookies, ncookies);
 }
 
-extern void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
-                                struct ldc_trans_cookie *cookies,
-                                int *ncookies, unsigned int map_perm);
+void *ldc_alloc_exp_dring(struct ldc_channel *lp, unsigned int len,
+                         struct ldc_trans_cookie *cookies,
+                         int *ncookies, unsigned int map_perm);
 
-extern void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
-                              unsigned int len,
-                              struct ldc_trans_cookie *cookies, int ncookies);
+void ldc_free_exp_dring(struct ldc_channel *lp, void *buf,
+                       unsigned int len,
+                       struct ldc_trans_cookie *cookies, int ncookies);
 
 #endif /* _SPARC64_LDC_H */
index c2f6ff6..204771c 100644 (file)
@@ -82,8 +82,8 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
 #define LEON_BYPASS_LOAD_PA(x)      leon_load_reg((unsigned long)(x))
 #define LEON_BYPASS_STORE_PA(x, v)  leon_store_reg((unsigned long)(x), (unsigned long)(v))
 
-extern void leon_switch_mm(void);
-extern void leon_init_IRQ(void);
+void leon_switch_mm(void);
+void leon_init_IRQ(void);
 
 static inline unsigned long sparc_leon3_get_dcachecfg(void)
 {
@@ -196,14 +196,14 @@ static inline int sparc_leon3_cpuid(void)
 #ifndef __ASSEMBLY__
 struct vm_area_struct;
 
-extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
-extern void leon_flush_icache_all(void);
-extern void leon_flush_dcache_all(void);
-extern void leon_flush_cache_all(void);
-extern void leon_flush_tlb_all(void);
+unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
+void leon_flush_icache_all(void);
+void leon_flush_dcache_all(void);
+void leon_flush_cache_all(void);
+void leon_flush_tlb_all(void);
 extern int leon_flush_during_switch;
-extern int leon_flush_needed(void);
-extern void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
+int leon_flush_needed(void);
+void leon_flush_pcache_all(struct vm_area_struct *vma, unsigned long page);
 
 /* struct that hold LEON3 cache configuration registers */
 struct leon3_cacheregs {
@@ -217,29 +217,29 @@ struct leon3_cacheregs {
 
 struct device_node;
 struct task_struct;
-extern unsigned int leon_build_device_irq(unsigned int real_irq,
-                                          irq_flow_handler_t flow_handler,
-                                          const char *name, int do_ack);
-extern void leon_update_virq_handling(unsigned int virq,
-                             irq_flow_handler_t flow_handler,
-                             const char *name, int do_ack);
-extern void leon_init_timers(void);
-extern void leon_trans_init(struct device_node *dp);
-extern void leon_node_init(struct device_node *dp, struct device_node ***nextp);
-extern void init_leon(void);
-extern void poke_leonsparc(void);
-extern void leon3_getCacheRegs(struct leon3_cacheregs *regs);
+unsigned int leon_build_device_irq(unsigned int real_irq,
+                                  irq_flow_handler_t flow_handler,
+                                  const char *name, int do_ack);
+void leon_update_virq_handling(unsigned int virq,
+                              irq_flow_handler_t flow_handler,
+                              const char *name, int do_ack);
+void leon_init_timers(void);
+void leon_trans_init(struct device_node *dp);
+void leon_node_init(struct device_node *dp, struct device_node ***nextp);
+void init_leon(void);
+void poke_leonsparc(void);
+void leon3_getCacheRegs(struct leon3_cacheregs *regs);
 extern int leon3_ticker_irq;
 
 #ifdef CONFIG_SMP
-extern int leon_smp_nrcpus(void);
-extern void leon_clear_profile_irq(int cpu);
-extern void leon_smp_done(void);
-extern void leon_boot_cpus(void);
-extern int leon_boot_one_cpu(int i, struct task_struct *);
+int leon_smp_nrcpus(void);
+void leon_clear_profile_irq(int cpu);
+void leon_smp_done(void);
+void leon_boot_cpus(void);
+int leon_boot_one_cpu(int i, struct task_struct *);
 void leon_init_smp(void);
 void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
-extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
+irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
 
 extern unsigned int smpleon_ipi[];
 extern unsigned int linux_trap_ipi15_leon[];
index bfd3ab3..049d067 100644 (file)
@@ -16,7 +16,7 @@ struct leon_pci_info {
        int (*map_irq)(const struct pci_dev *dev, u8 slot, u8 pin);
 };
 
-extern void leon_pci_init(struct platform_device *ofdev,
-                               struct leon_pci_info *info);
+void leon_pci_init(struct platform_device *ofdev,
+                  struct leon_pci_info *info);
 
 #endif /* _ASM_LEON_PCI_H_ */
index 67ed9e3..d8e72f3 100644 (file)
@@ -1,5 +1,10 @@
 #ifndef ___ASM_SPARC_MC146818RTC_H
 #define ___ASM_SPARC_MC146818RTC_H
+
+#include <linux/spinlock.h>
+
+extern spinlock_t rtc_lock;
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/mc146818rtc_64.h>
 #else
index 139097f..aebeb88 100644 (file)
@@ -12,13 +12,13 @@ struct mdesc_handle;
  * the first argument to all of the operational calls that work
  * on mdescs.
  */
-extern struct mdesc_handle *mdesc_grab(void);
-extern void mdesc_release(struct mdesc_handle *);
+struct mdesc_handle *mdesc_grab(void);
+void mdesc_release(struct mdesc_handle *);
 
 #define MDESC_NODE_NULL                (~(u64)0)
 
-extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
-                             u64 from_node, const char *name);
+u64 mdesc_node_by_name(struct mdesc_handle *handle,
+                      u64 from_node, const char *name);
 #define mdesc_for_each_node_by_name(__hdl, __node, __name) \
        for (__node = mdesc_node_by_name(__hdl, MDESC_NODE_NULL, __name); \
             (__node) != MDESC_NODE_NULL; \
@@ -34,9 +34,9 @@ extern u64 mdesc_node_by_name(struct mdesc_handle *handle,
  *
  * These same rules apply to mdesc_node_name().
  */
-extern const void *mdesc_get_property(struct mdesc_handle *handle,
-                                     u64 node, const char *name, int *lenp);
-extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
+const void *mdesc_get_property(struct mdesc_handle *handle,
+                              u64 node, const char *name, int *lenp);
+const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
 
 /* MD arc iteration, the standard sequence is:
  *
@@ -50,16 +50,16 @@ extern const char *mdesc_node_name(struct mdesc_handle *hp, u64 node);
 #define MDESC_ARC_TYPE_FWD     "fwd"
 #define MDESC_ARC_TYPE_BACK    "back"
 
-extern u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
-                         const char *arc_type);
+u64 mdesc_next_arc(struct mdesc_handle *handle, u64 from,
+                  const char *arc_type);
 #define mdesc_for_each_arc(__arc, __hdl, __node, __type) \
        for (__arc = mdesc_next_arc(__hdl, __node, __type); \
             (__arc) != MDESC_NODE_NULL; \
             __arc = mdesc_next_arc(__hdl, __arc, __type))
 
-extern u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
+u64 mdesc_arc_target(struct mdesc_handle *hp, u64 arc);
 
-extern void mdesc_update(void);
+void mdesc_update(void);
 
 struct mdesc_notifier_client {
        void (*add)(struct mdesc_handle *handle, u64 node);
@@ -69,12 +69,12 @@ struct mdesc_notifier_client {
        struct mdesc_notifier_client    *next;
 };
 
-extern void mdesc_register_notifier(struct mdesc_notifier_client *client);
+void mdesc_register_notifier(struct mdesc_notifier_client *client);
 
-extern void mdesc_fill_in_cpu_data(cpumask_t *mask);
-extern void mdesc_populate_present_mask(cpumask_t *mask);
-extern void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
+void mdesc_fill_in_cpu_data(cpumask_t *mask);
+void mdesc_populate_present_mask(cpumask_t *mask);
+void mdesc_get_page_sizes(cpumask_t *mask, unsigned long *pgsz_mask);
 
-extern void sun4v_mdesc_init(void);
+void sun4v_mdesc_init(void);
 
 #endif
index f668797..70067ce 100644 (file)
@@ -67,9 +67,9 @@ struct tsb {
        unsigned long pte;
 } __attribute__((aligned(TSB_ENTRY_ALIGNMENT)));
 
-extern void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
-extern void tsb_flush(unsigned long ent, unsigned long tag);
-extern void tsb_init(struct tsb *tsb, unsigned long size);
+void __tsb_insert(unsigned long ent, unsigned long tag, unsigned long pte);
+void tsb_flush(unsigned long ent, unsigned long tag);
+void tsb_init(struct tsb *tsb, unsigned long size);
 
 struct tsb_config {
        struct tsb              *tsb;
index 3d528f0..b84be67 100644 (file)
@@ -17,20 +17,20 @@ extern spinlock_t ctx_alloc_lock;
 extern unsigned long tlb_context_cache;
 extern unsigned long mmu_context_bmap[];
 
-extern void get_new_mmu_context(struct mm_struct *mm);
+void get_new_mmu_context(struct mm_struct *mm);
 #ifdef CONFIG_SMP
-extern void smp_new_mmu_context_version(void);
+void smp_new_mmu_context_version(void);
 #else
 #define smp_new_mmu_context_version() do { } while (0)
 #endif
 
-extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
-extern void destroy_context(struct mm_struct *mm);
+int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
+void destroy_context(struct mm_struct *mm);
 
-extern void __tsb_context_switch(unsigned long pgd_pa,
-                                struct tsb_config *tsb_base,
-                                struct tsb_config *tsb_huge,
-                                unsigned long tsb_descr_pa);
+void __tsb_context_switch(unsigned long pgd_pa,
+                         struct tsb_config *tsb_base,
+                         struct tsb_config *tsb_huge,
+                         unsigned long tsb_descr_pa);
 
 static inline void tsb_context_switch(struct mm_struct *mm)
 {
@@ -46,9 +46,11 @@ static inline void tsb_context_switch(struct mm_struct *mm)
                             , __pa(&mm->context.tsb_descr[0]));
 }
 
-extern void tsb_grow(struct mm_struct *mm, unsigned long tsb_index, unsigned long mm_rss);
+void tsb_grow(struct mm_struct *mm,
+             unsigned long tsb_index,
+             unsigned long mm_rss);
 #ifdef CONFIG_SMP
-extern void smp_tsb_sync(struct mm_struct *mm);
+void smp_tsb_sync(struct mm_struct *mm);
 #else
 #define smp_tsb_sync(__mm) do { } while (0)
 #endif
@@ -66,7 +68,7 @@ extern void smp_tsb_sync(struct mm_struct *mm);
        : "r" (CTX_HWBITS((__mm)->context)), \
          "r" (SECONDARY_CONTEXT), "i" (ASI_DMMU), "i" (ASI_MMU))
 
-extern void __flush_tlb_mm(unsigned long, unsigned long);
+void __flush_tlb_mm(unsigned long, unsigned long);
 
 /* Switch the current MM context. */
 static inline void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, struct task_struct *tsk)
index 72e6500..26ad2b2 100644 (file)
@@ -1,13 +1,13 @@
 #ifndef __NMI_H
 #define __NMI_H
 
-extern int __init nmi_init(void);
-extern void perfctr_irq(int irq, struct pt_regs *regs);
-extern void nmi_adjust_hz(unsigned int new_hz);
+int __init nmi_init(void);
+void perfctr_irq(int irq, struct pt_regs *regs);
+void nmi_adjust_hz(unsigned int new_hz);
 
 extern atomic_t nmi_active;
 
-extern void start_nmi_watchdog(void *unused);
-extern void stop_nmi_watchdog(void *unused);
+void start_nmi_watchdog(void *unused);
+void stop_nmi_watchdog(void *unused);
 
 #endif /* __NMI_H */
index c72f304..56a09b9 100644 (file)
@@ -43,28 +43,28 @@ extern struct linux_nodeops *prom_nodeops;
 /* You must call prom_init() before using any of the library services,
  * preferably as early as possible.  Pass it the romvec pointer.
  */
-extern void prom_init(struct linux_romvec *rom_ptr);
+void prom_init(struct linux_romvec *rom_ptr);
 
 /* Boot argument acquisition, returns the boot command line string. */
-extern char *prom_getbootargs(void);
+char *prom_getbootargs(void);
 
 /* Miscellaneous routines, don't really fit in any category per se. */
 
 /* Reboot the machine with the command line passed. */
-extern void prom_reboot(char *boot_command);
+void prom_reboot(char *boot_command);
 
 /* Evaluate the forth string passed. */
-extern void prom_feval(char *forth_string);
+void prom_feval(char *forth_string);
 
 /* Enter the prom, with possibility of continuation with the 'go'
  * command in newer proms.
  */
-extern void prom_cmdline(void);
+void prom_cmdline(void);
 
 /* Enter the prom, with no chance of continuation for the stand-alone
  * which calls this.
  */
-extern void __noreturn prom_halt(void);
+void __noreturn prom_halt(void);
 
 /* Set the PROM 'sync' callback function to the passed function pointer.
  * When the user gives the 'sync' command at the prom prompt while the
@@ -73,37 +73,37 @@ extern void __noreturn prom_halt(void);
  * XXX The arguments are different on V0 vs. V2->higher proms, grrr! XXX
  */
 typedef void (*sync_func_t)(void);
-extern void prom_setsync(sync_func_t func_ptr);
+void prom_setsync(sync_func_t func_ptr);
 
 /* Acquire the IDPROM of the root node in the prom device tree.  This
  * gets passed a buffer where you would like it stuffed.  The return value
  * is the format type of this idprom or 0xff on error.
  */
-extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
+unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
 
 /* Get the prom major version. */
-extern int prom_version(void);
+int prom_version(void);
 
 /* Get the prom plugin revision. */
-extern int prom_getrev(void);
+int prom_getrev(void);
 
 /* Get the prom firmware revision. */
-extern int prom_getprev(void);
+int prom_getprev(void);
 
 /* Write a buffer of characters to the console. */
-extern void prom_console_write_buf(const char *buf, int len);
+void prom_console_write_buf(const char *buf, int len);
 
 /* Prom's internal routines, don't use in kernel/boot code. */
-extern __printf(1, 2) void prom_printf(const char *fmt, ...);
-extern void prom_write(const char *buf, unsigned int len);
+__printf(1, 2) void prom_printf(const char *fmt, ...);
+void prom_write(const char *buf, unsigned int len);
 
 /* Multiprocessor operations... */
 
 /* Start the CPU with the given device tree node, context table, and context
  * at the passed program counter.
  */
-extern int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
-                        int context, char *program_counter);
+int prom_startcpu(int cpunode, struct linux_prom_registers *context_table,
+                 int context, char *program_counter);
 
 /* Initialize the memory lists based upon the prom version. */
 void prom_meminit(void);
@@ -111,65 +111,65 @@ void prom_meminit(void);
 /* PROM device tree traversal functions... */
 
 /* Get the child node of the given node, or zero if no child exists. */
-extern phandle prom_getchild(phandle parent_node);
+phandle prom_getchild(phandle parent_node);
 
 /* Get the next sibling node of the given node, or zero if no further
  * siblings exist.
  */
-extern phandle prom_getsibling(phandle node);
+phandle prom_getsibling(phandle node);
 
 /* Get the length, at the passed node, of the given property type.
  * Returns -1 on error (ie. no such property at this node).
  */
-extern int prom_getproplen(phandle thisnode, const char *property);
+int prom_getproplen(phandle thisnode, const char *property);
 
 /* Fetch the requested property using the given buffer.  Returns
  * the number of bytes the prom put into your buffer or -1 on error.
  */
-extern int __must_check prom_getproperty(phandle thisnode, const char *property,
-                                        char *prop_buffer, int propbuf_size);
+int __must_check prom_getproperty(phandle thisnode, const char *property,
+                                 char *prop_buffer, int propbuf_size);
 
 /* Acquire an integer property. */
-extern int prom_getint(phandle node, char *property);
+int prom_getint(phandle node, char *property);
 
 /* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(phandle node, char *property, int defval);
+int prom_getintdefault(phandle node, char *property, int defval);
 
 /* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(phandle node, char *prop);
+int prom_getbool(phandle node, char *prop);
 
 /* Acquire a string property, null string on error. */
-extern void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
+void prom_getstring(phandle node, char *prop, char *buf, int bufsize);
 
 /* Search all siblings starting at the passed node for "name" matching
  * the given string.  Returns the node on success, zero on failure.
  */
-extern phandle prom_searchsiblings(phandle node_start, char *name);
+phandle prom_searchsiblings(phandle node_start, char *name);
 
 /* Returns the next property after the passed property for the given
  * node.  Returns null string on failure.
  */
-extern char *prom_nextprop(phandle node, char *prev_property, char *buffer);
+char *prom_nextprop(phandle node, char *prev_property, char *buffer);
 
 /* Returns phandle of the path specified */
-extern phandle prom_finddevice(char *name);
+phandle prom_finddevice(char *name);
 
 /* Set the indicated property at the given node with the passed value.
  * Returns the number of bytes of your value that the prom took.
  */
-extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
-                       int value_size);
+int prom_setprop(phandle node, const char *prop_name, char *prop_value,
+                int value_size);
 
-extern phandle prom_inst2pkg(int);
+phandle prom_inst2pkg(int);
 
 /* Dorking with Bus ranges... */
 
 /* Apply promlib probes OBIO ranges to registers. */
-extern void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
+void prom_apply_obio_ranges(struct linux_prom_registers *obioregs, int nregs);
 
 /* Apply ranges of any prom node (and optionally parent node as well) to registers. */
-extern void prom_apply_generic_ranges(phandle node, phandle parent,
-                                     struct linux_prom_registers *sbusregs, int nregs);
+void prom_apply_generic_ranges(phandle node, phandle parent,
+                              struct linux_prom_registers *sbusregs, int nregs);
 
 void prom_ranges_init(void);
 
index a12dbe3..f346824 100644 (file)
@@ -62,100 +62,100 @@ struct linux_mem_p1275 {
 /* You must call prom_init() before using any of the library services,
  * preferably as early as possible.  Pass it the romvec pointer.
  */
-extern void prom_init(void *cif_handler, void *cif_stack);
+void prom_init(void *cif_handler, void *cif_stack);
 
 /* Boot argument acquisition, returns the boot command line string. */
-extern char *prom_getbootargs(void);
+char *prom_getbootargs(void);
 
 /* Miscellaneous routines, don't really fit in any category per se. */
 
 /* Reboot the machine with the command line passed. */
-extern void prom_reboot(const char *boot_command);
+void prom_reboot(const char *boot_command);
 
 /* Evaluate the forth string passed. */
-extern void prom_feval(const char *forth_string);
+void prom_feval(const char *forth_string);
 
 /* Enter the prom, with possibility of continuation with the 'go'
  * command in newer proms.
  */
-extern void prom_cmdline(void);
+void prom_cmdline(void);
 
 /* Enter the prom, with no chance of continuation for the stand-alone
  * which calls this.
  */
-extern void prom_halt(void) __attribute__ ((noreturn));
+void prom_halt(void) __attribute__ ((noreturn));
 
 /* Halt and power-off the machine. */
-extern void prom_halt_power_off(void) __attribute__ ((noreturn));
+void prom_halt_power_off(void) __attribute__ ((noreturn));
 
 /* Acquire the IDPROM of the root node in the prom device tree.  This
  * gets passed a buffer where you would like it stuffed.  The return value
  * is the format type of this idprom or 0xff on error.
  */
-extern unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
+unsigned char prom_get_idprom(char *idp_buffer, int idpbuf_size);
 
 /* Write a buffer of characters to the console. */
-extern void prom_console_write_buf(const char *buf, int len);
+void prom_console_write_buf(const char *buf, int len);
 
 /* Prom's internal routines, don't use in kernel/boot code. */
-extern __printf(1, 2) void prom_printf(const char *fmt, ...);
-extern void prom_write(const char *buf, unsigned int len);
+__printf(1, 2) void prom_printf(const char *fmt, ...);
+void prom_write(const char *buf, unsigned int len);
 
 /* Multiprocessor operations... */
 #ifdef CONFIG_SMP
 /* Start the CPU with the given device tree node at the passed program
  * counter with the given arg passed in via register %o0.
  */
-extern void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
+void prom_startcpu(int cpunode, unsigned long pc, unsigned long arg);
 
 /* Start the CPU with the given cpu ID at the passed program
  * counter with the given arg passed in via register %o0.
  */
-extern void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
+void prom_startcpu_cpuid(int cpuid, unsigned long pc, unsigned long arg);
 
 /* Stop the CPU with the given cpu ID.  */
-extern void prom_stopcpu_cpuid(int cpuid);
+void prom_stopcpu_cpuid(int cpuid);
 
 /* Stop the current CPU. */
-extern void prom_stopself(void);
+void prom_stopself(void);
 
 /* Idle the current CPU. */
-extern void prom_idleself(void);
+void prom_idleself(void);
 
 /* Resume the CPU with the passed device tree node. */
-extern void prom_resumecpu(int cpunode);
+void prom_resumecpu(int cpunode);
 #endif
 
 /* Power management interfaces. */
 
 /* Put the current CPU to sleep. */
-extern void prom_sleepself(void);
+void prom_sleepself(void);
 
 /* Put the entire system to sleep. */
-extern int prom_sleepsystem(void);
+int prom_sleepsystem(void);
 
 /* Initiate a wakeup event. */
-extern int prom_wakeupsystem(void);
+int prom_wakeupsystem(void);
 
 /* MMU and memory related OBP interfaces. */
 
 /* Get unique string identifying SIMM at given physical address. */
-extern int prom_getunumber(int syndrome_code,
-                          unsigned long phys_addr,
-                          char *buf, int buflen);
+int prom_getunumber(int syndrome_code,
+                   unsigned long phys_addr,
+                   char *buf, int buflen);
 
 /* Retain physical memory to the caller across soft resets. */
-extern int prom_retain(const char *name, unsigned long size,
-                      unsigned long align, unsigned long *paddr);
+int prom_retain(const char *name, unsigned long size,
+               unsigned long align, unsigned long *paddr);
 
 /* Load explicit I/D TLB entries into the calling processor. */
-extern long prom_itlb_load(unsigned long index,
-                          unsigned long tte_data,
-                          unsigned long vaddr);
+long prom_itlb_load(unsigned long index,
+                   unsigned long tte_data,
+                   unsigned long vaddr);
 
-extern long prom_dtlb_load(unsigned long index,
-                          unsigned long tte_data,
-                          unsigned long vaddr);
+long prom_dtlb_load(unsigned long index,
+                   unsigned long tte_data,
+                   unsigned long vaddr);
 
 /* Map/Unmap client program address ranges.  First the format of
  * the mapping mode argument.
@@ -170,81 +170,81 @@ extern long prom_dtlb_load(unsigned long index,
 #define PROM_MAP_IE    0x0100 /* Invert-Endianness */
 #define PROM_MAP_DEFAULT (PROM_MAP_WRITE | PROM_MAP_READ | PROM_MAP_EXEC | PROM_MAP_CACHED)
 
-extern int prom_map(int mode, unsigned long size,
-                   unsigned long vaddr, unsigned long paddr);
-extern void prom_unmap(unsigned long size, unsigned long vaddr);
+int prom_map(int mode, unsigned long size,
+            unsigned long vaddr, unsigned long paddr);
+void prom_unmap(unsigned long size, unsigned long vaddr);
 
 
 /* PROM device tree traversal functions... */
 
 /* Get the child node of the given node, or zero if no child exists. */
-extern phandle prom_getchild(phandle parent_node);
+phandle prom_getchild(phandle parent_node);
 
 /* Get the next sibling node of the given node, or zero if no further
  * siblings exist.
  */
-extern phandle prom_getsibling(phandle node);
+phandle prom_getsibling(phandle node);
 
 /* Get the length, at the passed node, of the given property type.
  * Returns -1 on error (ie. no such property at this node).
  */
-extern int prom_getproplen(phandle thisnode, const char *property);
+int prom_getproplen(phandle thisnode, const char *property);
 
 /* Fetch the requested property using the given buffer.  Returns
  * the number of bytes the prom put into your buffer or -1 on error.
  */
-extern int prom_getproperty(phandle thisnode, const char *property,
-                           char *prop_buffer, int propbuf_size);
+int prom_getproperty(phandle thisnode, const char *property,
+                    char *prop_buffer, int propbuf_size);
 
 /* Acquire an integer property. */
-extern int prom_getint(phandle node, const char *property);
+int prom_getint(phandle node, const char *property);
 
 /* Acquire an integer property, with a default value. */
-extern int prom_getintdefault(phandle node, const char *property, int defval);
+int prom_getintdefault(phandle node, const char *property, int defval);
 
 /* Acquire a boolean property, 0=FALSE 1=TRUE. */
-extern int prom_getbool(phandle node, const char *prop);
+int prom_getbool(phandle node, const char *prop);
 
 /* Acquire a string property, null string on error. */
-extern void prom_getstring(phandle node, const char *prop, char *buf,
-                          int bufsize);
+void prom_getstring(phandle node, const char *prop, char *buf,
+                   int bufsize);
 
 /* Does the passed node have the given "name"? YES=1 NO=0 */
-extern int prom_nodematch(phandle thisnode, const char *name);
+int prom_nodematch(phandle thisnode, const char *name);
 
 /* Search all siblings starting at the passed node for "name" matching
  * the given string.  Returns the node on success, zero on failure.
  */
-extern phandle prom_searchsiblings(phandle node_start, const char *name);
+phandle prom_searchsiblings(phandle node_start, const char *name);
 
 /* Return the first property type, as a string, for the given node.
  * Returns a null string on error. Buffer should be at least 32B long.
  */
-extern char *prom_firstprop(phandle node, char *buffer);
+char *prom_firstprop(phandle node, char *buffer);
 
 /* Returns the next property after the passed property for the given
  * node.  Returns null string on failure. Buffer should be at least 32B long.
  */
-extern char *prom_nextprop(phandle node, const char *prev_property, char *buf);
+char *prom_nextprop(phandle node, const char *prev_property, char *buf);
 
 /* Returns 1 if the specified node has given property. */
-extern int prom_node_has_property(phandle node, const char *property);
+int prom_node_has_property(phandle node, const char *property);
 
 /* Returns phandle of the path specified */
-extern phandle prom_finddevice(const char *name);
+phandle prom_finddevice(const char *name);
 
 /* Set the indicated property at the given node with the passed value.
  * Returns the number of bytes of your value that the prom took.
  */
-extern int prom_setprop(phandle node, const char *prop_name, char *prop_value,
-                       int value_size);
+int prom_setprop(phandle node, const char *prop_name, char *prop_value,
+                int value_size);
 
-extern phandle prom_inst2pkg(int);
-extern void prom_sun4v_guest_soft_state(void);
+phandle prom_inst2pkg(int);
+void prom_sun4v_guest_soft_state(void);
 
-extern int prom_ihandle2path(int handle, char *buffer, int bufsize);
+int prom_ihandle2path(int handle, char *buffer, int bufsize);
 
 /* Client interface level routines. */
-extern void p1275_cmd_direct(unsigned long *);
+void p1275_cmd_direct(unsigned long *);
 
 #endif /* !(__SPARC64_OPLIB_H) */
index f21de03..1be2fde 100644 (file)
@@ -1,5 +1,8 @@
 #ifndef ___ASM_SPARC_PAGE_H
 #define ___ASM_SPARC_PAGE_H
+
+#define page_to_phys(page)     (page_to_pfn(page) << PAGE_SHIFT)
+
 #if defined(__sparc__) && defined(__arch64__)
 #include <asm/page_64.h>
 #else
index aac53fc..bf10998 100644 (file)
 
 #if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE)
 struct pt_regs;
-extern void hugetlb_setup(struct pt_regs *regs);
+void hugetlb_setup(struct pt_regs *regs);
 #endif
 
 #define WANT_PAGE_VIRTUAL
 
-extern void _clear_page(void *page);
+void _clear_page(void *page);
 #define clear_page(X)  _clear_page((void *)(X))
 struct page;
-extern void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
+void clear_user_page(void *addr, unsigned long vaddr, struct page *page);
 #define copy_page(X,Y) memcpy((void *)(X), (void *)(Y), PAGE_SIZE)
-extern void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
+void copy_user_page(void *to, void *from, unsigned long vaddr, struct page *topage);
 
 /* Unlike sparc32, sparc64's parameter passing API is more
  * sane in that structures which as small enough are passed
index c6c7396..bd00a62 100644 (file)
@@ -52,7 +52,7 @@ static inline void pci_dma_burst_advice(struct pci_dev *pdev,
 
 /* Return the index of the PCI controller for device PDEV. */
 
-extern int pci_domain_nr(struct pci_bus *bus);
+int pci_domain_nr(struct pci_bus *bus);
 static inline int pci_proc_domain(struct pci_bus *bus)
 {
        return 1;
@@ -64,9 +64,9 @@ static inline int pci_proc_domain(struct pci_bus *bus)
 #define HAVE_ARCH_PCI_GET_UNMAPPED_AREA
 #define get_pci_unmapped_area get_fb_unmapped_area
 
-extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
-                              enum pci_mmap_state mmap_state,
-                              int write_combine);
+int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
+                       enum pci_mmap_state mmap_state,
+                       int write_combine);
 
 static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 {
@@ -74,9 +74,9 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
 }
 
 #define HAVE_ARCH_PCI_RESOURCE_TO_USER
-extern void pci_resource_to_user(const struct pci_dev *dev, int bar,
-                                const struct resource *rsrc,
-                                resource_size_t *start, resource_size_t *end);
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+                         const struct resource *rsrc,
+                         resource_size_t *start, resource_size_t *end);
 #endif /* __KERNEL__ */
 
 #endif /* __SPARC64_PCI_H */
index 6676cbc..f417067 100644 (file)
@@ -30,10 +30,10 @@ struct linux_pcic {
 };
 
 #ifdef CONFIG_PCIC_PCI
-extern int pcic_present(void);
-extern int pcic_probe(void);
-extern void pci_time_init(void);
-extern void sun4m_pci_init_IRQ(void);
+int pcic_present(void);
+int pcic_probe(void);
+void pci_time_init(void);
+void sun4m_pci_init_IRQ(void);
 #else
 static inline int pcic_present(void) { return 0; }
 static inline int pcic_probe(void) { return 0; }
index 942bb17..cdf800c 100644 (file)
@@ -12,8 +12,8 @@ struct pcr_ops {
 };
 extern const struct pcr_ops *pcr_ops;
 
-extern void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
-extern void schedule_deferred_pcr_work(void);
+void deferred_pcr_work_irq(int irq, struct pt_regs *regs);
+void schedule_deferred_pcr_work(void);
 
 #define PCR_PIC_PRIV           0x00000001 /* PIC access is privileged */
 #define PCR_STRACE             0x00000002 /* Trace supervisor events  */
@@ -45,6 +45,6 @@ extern void schedule_deferred_pcr_work(void);
 #define PCR_N4_PICNHT          0x00020000 /* PIC non-hypervisor trap  */
 #define PCR_N4_NTC             0x00040000 /* Next-To-Commit wrap      */
 
-extern int pcr_arch_init(void);
+int pcr_arch_init(void);
 
 #endif /* __PCR_H */
index 9b1c36d..a3890da 100644 (file)
@@ -14,6 +14,8 @@ struct page;
 void *srmmu_get_nocache(int size, int align);
 void srmmu_free_nocache(void *addr, int size);
 
+extern struct resource sparc_iomap;
+
 #define check_pgt_cache()      do { } while (0)
 
 pgd_t *get_pgd_fast(void);
index bcfe063..39a7ac4 100644 (file)
@@ -38,12 +38,12 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
        kmem_cache_free(pgtable_cache, pmd);
 }
 
-extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
-                                  unsigned long address);
-extern pgtable_t pte_alloc_one(struct mm_struct *mm,
-                              unsigned long address);
-extern void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
-extern void pte_free(struct mm_struct *mm, pgtable_t ptepage);
+pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
+                           unsigned long address);
+pgtable_t pte_alloc_one(struct mm_struct *mm,
+                       unsigned long address);
+void pte_free_kernel(struct mm_struct *mm, pte_t *pte);
+void pte_free(struct mm_struct *mm, pgtable_t ptepage);
 
 #define pmd_populate_kernel(MM, PMD, PTE)      pmd_set(MM, PMD, PTE)
 #define pmd_populate(MM, PMD, PTE)             pmd_set(MM, PMD, PTE)
@@ -51,12 +51,12 @@ extern void pte_free(struct mm_struct *mm, pgtable_t ptepage);
 
 #define check_pgt_cache()      do { } while (0)
 
-extern void pgtable_free(void *table, bool is_page);
+void pgtable_free(void *table, bool is_page);
 
 #ifdef CONFIG_SMP
 
 struct mmu_gather;
-extern void tlb_remove_table(struct mmu_gather *, void *);
+void tlb_remove_table(struct mmu_gather *, void *);
 
 static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, bool is_page)
 {
index 502f632..b9b91ae 100644 (file)
@@ -25,8 +25,9 @@
 struct vm_area_struct;
 struct page;
 
-extern void load_mmu(void);
-extern unsigned long calc_highpages(void);
+void load_mmu(void);
+unsigned long calc_highpages(void);
+unsigned long __init bootmem_init(unsigned long *pages_avail);
 
 #define pte_ERROR(e)   __builtin_trap()
 #define pmd_ERROR(e)   __builtin_trap()
@@ -56,7 +57,7 @@ extern unsigned long calc_highpages(void);
  * srmmu.c will assign the real one (which is dynamically sized) */
 #define swapper_pg_dir NULL
 
-extern void paging_init(void);
+void paging_init(void);
 
 extern unsigned long ptr_in_current_pgd;
 
@@ -428,8 +429,8 @@ extern unsigned long *sparc_valid_addr_bitmap;
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffUL)
 
-extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
-                          unsigned long, pgprot_t);
+int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                   unsigned long, pgprot_t);
 
 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
                                     unsigned long from, unsigned long pfn,
index 1a49ffd..3770bf5 100644 (file)
@@ -210,9 +210,9 @@ static inline bool kern_addr_valid(unsigned long addr)
 
 #ifndef __ASSEMBLY__
 
-extern pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
+pte_t mk_pte_io(unsigned long, pgprot_t, int, unsigned long);
 
-extern unsigned long pte_sz_bits(unsigned long size);
+unsigned long pte_sz_bits(unsigned long size);
 
 extern pgprot_t PAGE_KERNEL;
 extern pgprot_t PAGE_KERNEL_LOCKED;
@@ -780,8 +780,8 @@ static inline int pmd_present(pmd_t pmd)
                                         !__kern_addr_valid(pud_val(pud)))
 
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern void set_pmd_at(struct mm_struct *mm, unsigned long addr,
-                      pmd_t *pmdp, pmd_t pmd);
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+               pmd_t *pmdp, pmd_t pmd);
 #else
 static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
                              pmd_t *pmdp, pmd_t pmd)
@@ -840,8 +840,8 @@ static inline unsigned long __pmd_page(pmd_t pmd)
 #define pte_unmap(pte)                 do { } while (0)
 
 /* Actual page table PTE updates.  */
-extern void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
-                         pte_t *ptep, pte_t orig, int fullmm);
+void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr,
+                  pte_t *ptep, pte_t orig, int fullmm);
 
 #define __HAVE_ARCH_PMDP_GET_AND_CLEAR
 static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
@@ -900,28 +900,28 @@ static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr,
 extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
 extern pmd_t swapper_low_pmd_dir[PTRS_PER_PMD];
 
-extern void paging_init(void);
-extern unsigned long find_ecache_flush_span(unsigned long size);
+void paging_init(void);
+unsigned long find_ecache_flush_span(unsigned long size);
 
 struct seq_file;
-extern void mmu_info(struct seq_file *);
+void mmu_info(struct seq_file *);
 
 struct vm_area_struct;
-extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
+void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t *);
 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
-extern void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
-                                pmd_t *pmd);
+void update_mmu_cache_pmd(struct vm_area_struct *vma, unsigned long addr,
+                         pmd_t *pmd);
 
 #define __HAVE_ARCH_PMDP_INVALIDATE
 extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
                            pmd_t *pmdp);
 
 #define __HAVE_ARCH_PGTABLE_DEPOSIT
-extern void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
-                                      pgtable_t pgtable);
+void pgtable_trans_huge_deposit(struct mm_struct *mm, pmd_t *pmdp,
+                               pgtable_t pgtable);
 
 #define __HAVE_ARCH_PGTABLE_WITHDRAW
-extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
+pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 #endif
 
 /* Encode and de-code a swap entry */
@@ -937,12 +937,12 @@ extern pgtable_t pgtable_trans_huge_withdraw(struct mm_struct *mm, pmd_t *pmdp);
 #define __swp_entry_to_pte(x)          ((pte_t) { (x).val })
 
 /* File offset in PTE support. */
-extern unsigned long pte_file(pte_t);
+unsigned long pte_file(pte_t);
 #define pte_to_pgoff(pte)      (pte_val(pte) >> PAGE_SHIFT)
-extern pte_t pgoff_to_pte(unsigned long);
+pte_t pgoff_to_pte(unsigned long);
 #define PTE_FILE_MAX_BITS      (64UL - PAGE_SHIFT - 1UL)
 
-extern int page_in_phys_avail(unsigned long paddr);
+int page_in_phys_avail(unsigned long paddr);
 
 /*
  * For sparc32&64, the pfn in io_remap_pfn_range() carries <iospace> in
@@ -952,8 +952,8 @@ extern int page_in_phys_avail(unsigned long paddr);
 #define GET_IOSPACE(pfn)               (pfn >> (BITS_PER_LONG - 4))
 #define GET_PFN(pfn)                   (pfn & 0x0fffffffffffffffUL)
 
-extern int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
-                          unsigned long, pgprot_t);
+int remap_pfn_range(struct vm_area_struct *, unsigned long, unsigned long,
+                   unsigned long, pgprot_t);
 
 static inline int io_remap_pfn_range(struct vm_area_struct *vma,
                                     unsigned long from, unsigned long pfn,
@@ -981,20 +981,20 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
 /* We provide a special get_unmapped_area for framebuffer mmaps to try and use
  * the largest alignment possible such that larget PTEs can be used.
  */
-extern unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
-                                         unsigned long, unsigned long,
-                                         unsigned long);
+unsigned long get_fb_unmapped_area(struct file *filp, unsigned long,
+                                  unsigned long, unsigned long,
+                                  unsigned long);
 #define HAVE_ARCH_FB_UNMAPPED_AREA
 
-extern void pgtable_cache_init(void);
-extern void sun4v_register_fault_status(void);
-extern void sun4v_ktsb_register(void);
-extern void __init cheetah_ecache_flush_init(void);
-extern void sun4v_patch_tlb_handlers(void);
+void pgtable_cache_init(void);
+void sun4v_register_fault_status(void);
+void sun4v_ktsb_register(void);
+void __init cheetah_ecache_flush_init(void);
+void sun4v_patch_tlb_handlers(void);
 
 extern unsigned long cmdline_memory_size;
 
-extern asmlinkage void do_sparc64_fault(struct pt_regs *regs);
+asmlinkage void do_sparc64_fault(struct pt_regs *regs);
 
 #endif /* !(__ASSEMBLY__) */
 
index 2c7baa4..a564817 100644 (file)
@@ -74,7 +74,7 @@ struct thread_struct {
 }
 
 /* Return saved PC of a blocked thread. */
-extern unsigned long thread_saved_pc(struct task_struct *t);
+unsigned long thread_saved_pc(struct task_struct *t);
 
 /* Do necessary setup to start up a newly executed thread. */
 static inline void start_thread(struct pt_regs * regs, unsigned long pc,
@@ -107,7 +107,7 @@ static inline void start_thread(struct pt_regs * regs, unsigned long pc,
 /* Free all resources held by a thread. */
 #define release_thread(tsk)            do { } while(0)
 
-extern unsigned long get_wchan(struct task_struct *);
+unsigned long get_wchan(struct task_struct *);
 
 #define task_pt_regs(tsk) ((tsk)->thread.kregs)
 #define KSTK_EIP(tsk)  ((tsk)->thread.kregs->pc)
@@ -116,6 +116,7 @@ extern unsigned long get_wchan(struct task_struct *);
 #ifdef __KERNEL__
 
 extern struct task_struct *last_task_used_math;
+int do_mathemu(struct pt_regs *regs, struct task_struct *fpt);
 
 #define cpu_relax()    barrier()
 extern void (*sparc_idle)(void);
index 4c3f7f0..7028fe1 100644 (file)
@@ -95,7 +95,7 @@ struct thread_struct {
 
 /* Return saved PC of a blocked thread. */
 struct task_struct;
-extern unsigned long thread_saved_pc(struct task_struct *);
+unsigned long thread_saved_pc(struct task_struct *);
 
 /* On Uniprocessor, even in RMO processes see TSO semantics */
 #ifdef CONFIG_SMP
@@ -194,7 +194,7 @@ do { \
 /* Free all resources held by a thread. */
 #define release_thread(tsk)            do { } while (0)
 
-extern unsigned long get_wchan(struct task_struct *task);
+unsigned long get_wchan(struct task_struct *task);
 
 #define task_pt_regs(tsk) (task_thread_info(tsk)->kregs)
 #define KSTK_EIP(tsk)  (task_pt_regs(tsk)->tpc)
@@ -253,6 +253,8 @@ static inline void prefetchw(const void *x)
 
 #define HAVE_ARCH_PICK_MMAP_LAYOUT
 
+int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap);
+
 #endif /* !(__ASSEMBLY__) */
 
 #endif /* !(__ASM_SPARC64_PROCESSOR_H) */
index 11ebd65..d955c8d 100644 (file)
@@ -36,28 +36,28 @@ struct of_irq_controller {
        void            *data;
 };
 
-extern struct device_node *of_find_node_by_cpuid(int cpuid);
-extern int of_set_property(struct device_node *node, const char *name, void *val, int len);
+struct device_node *of_find_node_by_cpuid(int cpuid);
+int of_set_property(struct device_node *node, const char *name, void *val, int len);
 extern struct mutex of_set_property_mutex;
-extern int of_getintprop_default(struct device_node *np,
-                                const char *name,
+int of_getintprop_default(struct device_node *np,
+                         const char *name,
                                 int def);
-extern int of_find_in_proplist(const char *list, const char *match, int len);
+int of_find_in_proplist(const char *list, const char *match, int len);
 
-extern void prom_build_devicetree(void);
-extern void of_populate_present_mask(void);
-extern void of_fill_in_cpu_data(void);
+void prom_build_devicetree(void);
+void of_populate_present_mask(void);
+void of_fill_in_cpu_data(void);
 
 struct resource;
-extern void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
-extern void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
+void __iomem *of_ioremap(struct resource *res, unsigned long offset, unsigned long size, char *name);
+void of_iounmap(struct resource *res, void __iomem *base, unsigned long size);
 
 extern struct device_node *of_console_device;
 extern char *of_console_path;
 extern char *of_console_options;
 
-extern void irq_trans_init(struct device_node *dp);
-extern char *build_path_component(struct device_node *dp);
+void irq_trans_init(struct device_node *dp);
+char *build_path_component(struct device_node *dp);
 
 #endif /* __KERNEL__ */
 #endif /* _SPARC_PROM_H */
index bdfafd7..bac6a94 100644 (file)
@@ -73,7 +73,7 @@ static inline long regs_return_value(struct pt_regs *regs)
        return regs->u_regs[UREG_I0];
 }
 #ifdef CONFIG_SMP
-extern unsigned long profile_pc(struct pt_regs *);
+unsigned long profile_pc(struct pt_regs *);
 #else
 #define profile_pc(regs) instruction_pointer(regs)
 #endif
index 5e35e05..f5fffd8 100644 (file)
@@ -4,8 +4,9 @@
 #ifndef _SPARC_SETUP_H
 #define _SPARC_SETUP_H
 
-#include <uapi/asm/setup.h>
+#include <linux/interrupt.h>
 
+#include <uapi/asm/setup.h>
 
 extern char reboot_command[];
 
@@ -22,9 +23,43 @@ static inline int con_is_present(void)
 {
        return serial_console ? 0 : 1;
 }
+
+/* from irq_32.c */
+extern volatile unsigned char *fdc_status;
+extern char *pdma_vaddr;
+extern unsigned long pdma_size;
+extern volatile int doing_pdma;
+
+/* This is software state */
+extern char *pdma_base;
+extern unsigned long pdma_areasize;
+
+int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler);
+
+/* setup_32.c */
+extern unsigned long cmdline_memory_size;
+
+/* devices.c */
+void __init device_scan(void);
+
+/* unaligned_32.c */
+unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int);
+
+#endif
+
+#ifdef CONFIG_SPARC64
+/* unaligned_64.c */
+int handle_ldf_stq(u32 insn, struct pt_regs *regs);
+void handle_ld_nf(u32 insn, struct pt_regs *regs);
+
+/* init_64.c */
+extern atomic_t dcpage_flushes;
+extern atomic_t dcpage_flushes_xcall;
+
+extern int sysctl_tsb_ratio;
 #endif
 
-extern void sun_do_break(void);
+void sun_do_break(void);
 extern int stop_a_enabled;
 extern int scons_pwroff;
 
index 01d9c3b..838c9d5 100644 (file)
@@ -79,9 +79,9 @@
   __asm__ ("addcc %r7,%8,%2\n\t"                                       \
           "addxcc %r5,%6,%1\n\t"                                       \
           "addx %r3,%4,%0\n"                                           \
-          : "=r" ((USItype)(r2)),                                      \
-            "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=r" (r2),                                                 \
+            "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x2)),                                     \
             "rI" ((USItype)(y2)),                                      \
             "%rJ" ((USItype)(x1)),                                     \
@@ -94,9 +94,9 @@
   __asm__ ("subcc %r7,%8,%2\n\t"                                       \
            "subxcc %r5,%6,%1\n\t"                                      \
            "subx %r3,%4,%0\n"                                          \
-          : "=r" ((USItype)(r2)),                                      \
-            "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=r" (r2),                                                 \
+            "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x2)),                                     \
             "rI" ((USItype)(y2)),                                      \
             "%rJ" ((USItype)(x1)),                                     \
            "addxcc %r6,%7,%0\n\t"                                      \
            "addxcc %r4,%5,%%g2\n\t"                                    \
            "addx %r2,%3,%%g1\n\t"                                      \
-          : "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x3)),                                     \
             "rI" ((USItype)(y3)),                                      \
             "%rJ" ((USItype)(x2)),                                     \
            "subxcc %r6,%7,%0\n\t"                                      \
            "subxcc %r4,%5,%%g2\n\t"                                    \
            "subx %r2,%3,%%g1\n\t"                                      \
-          : "=&r" ((USItype)(r1)),                                     \
-            "=&r" ((USItype)(r0))                                      \
+          : "=&r" (r1),                                                \
+            "=&r" (r0)                                                 \
           : "%rJ" ((USItype)(x3)),                                     \
             "rI" ((USItype)(y3)),                                      \
             "%rJ" ((USItype)(x2)),                                     \
           "addxcc %2,%%g0,%2\n\t"                                      \
           "addxcc %1,%%g0,%1\n\t"                                      \
           "addx %0,%%g0,%0\n\t"                                        \
-          : "=&r" ((USItype)(x3)),                                     \
-            "=&r" ((USItype)(x2)),                                     \
-            "=&r" ((USItype)(x1)),                                     \
-            "=&r" ((USItype)(x0))                                      \
+          : "=&r" (x3),                                                \
+            "=&r" (x2),                                                \
+            "=&r" (x1),                                                \
+            "=&r" (x0)                                                 \
           : "rI" ((USItype)(i)),                                       \
             "0" ((USItype)(x3)),                                       \
             "1" ((USItype)(x2)),                                       \
index 3c8917f..7c24e08 100644 (file)
@@ -93,15 +93,15 @@ static inline void xc4(smpfunc_t func, unsigned long arg1, unsigned long arg2,
                                    arg1, arg2, arg3, arg4);
 }
 
-extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 static inline int cpu_logical_map(int cpu)
 {
        return cpu;
 }
 
-extern int hard_smp_processor_id(void);
+int hard_smp_processor_id(void);
 
 #define raw_smp_processor_id()         (current_thread_info()->cpu)
 
index 0571039..26d9e77 100644 (file)
 DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
 extern cpumask_t cpu_core_map[NR_CPUS];
 
-extern void arch_send_call_function_single_ipi(int cpu);
-extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
+void arch_send_call_function_single_ipi(int cpu);
+void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 /*
  *     General functions that each host system must provide.
  */
 
-extern int hard_smp_processor_id(void);
+int hard_smp_processor_id(void);
 #define raw_smp_processor_id() (current_thread_info()->cpu)
 
-extern void smp_fill_in_sib_core_maps(void);
-extern void cpu_play_dead(void);
+void smp_fill_in_sib_core_maps(void);
+void cpu_play_dead(void);
 
-extern void smp_fetch_global_regs(void);
-extern void smp_fetch_global_pmu(void);
+void smp_fetch_global_regs(void);
+void smp_fetch_global_pmu(void);
 
 struct seq_file;
 void smp_bogo(struct seq_file *);
 void smp_info(struct seq_file *);
 
+void smp_callin(void);
+void cpu_panic(void);
+void smp_synchronize_tick_client(void);
+void smp_capture(void);
+void smp_release(void);
+
 #ifdef CONFIG_HOTPLUG_CPU
-extern int __cpu_disable(void);
-extern void __cpu_die(unsigned int cpu);
+int __cpu_disable(void);
+void __cpu_die(unsigned int cpu);
 #endif
 
 #endif /* !(__ASSEMBLY__) */
index 6b67e50..3fc5869 100644 (file)
@@ -62,7 +62,7 @@ extern enum ultra_tlb_layout tlb_type;
 extern int sun4v_chip_type;
 
 extern int cheetah_pcache_forced_on;
-extern void cheetah_enable_pcache(void);
+void cheetah_enable_pcache(void);
 
 #define sparc64_highest_locked_tlbent()        \
        (tlb_type == spitfire ? \
index 6cee39a..c30d066 100644 (file)
@@ -1,6 +1,6 @@
 #ifndef _SPARC64_STACKTRACE_H
 #define _SPARC64_STACKTRACE_H
 
-extern void stack_trace_flush(void);
+void stack_trace_flush(void);
 
 #endif /* _SPARC64_STACKTRACE_H */
index d56ce60..c100dc2 100644 (file)
 
 extern int this_is_starfire;
 
-extern void check_if_starfire(void);
-extern int starfire_hard_smp_processor_id(void);
-extern void starfire_hookup(int);
-extern unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
+void check_if_starfire(void);
+int starfire_hard_smp_processor_id(void);
+void starfire_hookup(int);
+unsigned int starfire_translate(unsigned long imap, unsigned int upaid);
 
 #endif
 #endif
index 12f6785..69974e9 100644 (file)
@@ -15,7 +15,7 @@
 
 #ifdef __KERNEL__
 
-extern void __memmove(void *,const void *,__kernel_size_t);
+void __memmove(void *,const void *,__kernel_size_t);
 
 #ifndef EXPORT_SYMTAB_STROPS
 
@@ -40,8 +40,8 @@ extern void __memmove(void *,const void *,__kernel_size_t);
 #undef memscan
 #define memscan(__arg0, __char, __arg2)                                                \
 ({                                                                             \
-       extern void *__memscan_zero(void *, size_t);                            \
-       extern void *__memscan_generic(void *, int, size_t);                    \
+       void *__memscan_zero(void *, size_t);                                   \
+       void *__memscan_generic(void *, int, size_t);                           \
        void *__retval, *__addr = (__arg0);                                     \
        size_t __size = (__arg2);                                               \
                                                                                \
@@ -54,14 +54,14 @@ extern void __memmove(void *,const void *,__kernel_size_t);
 })
 
 #define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *,const void *,__kernel_size_t);
+int memcmp(const void *,const void *,__kernel_size_t);
 
 /* Now the str*() stuff... */
 #define __HAVE_ARCH_STRLEN
-extern __kernel_size_t strlen(const char *);
+__kernel_size_t strlen(const char *);
 
 #define __HAVE_ARCH_STRNCMP
-extern int strncmp(const char *, const char *, __kernel_size_t);
+int strncmp(const char *, const char *, __kernel_size_t);
 
 #endif /* !EXPORT_SYMTAB_STROPS */
 
index 9623bc2..5936b8f 100644 (file)
@@ -19,7 +19,7 @@
 
 /* First the mem*() things. */
 #define __HAVE_ARCH_MEMMOVE
-extern void *memmove(void *, const void *, __kernel_size_t);
+void *memmove(void *, const void *, __kernel_size_t);
 
 #define __HAVE_ARCH_MEMCPY
 #define memcpy(t, f, n) __builtin_memcpy(t, f, n)
@@ -32,8 +32,8 @@ extern void *memmove(void *, const void *, __kernel_size_t);
 #undef memscan
 #define memscan(__arg0, __char, __arg2)                                        \
 ({                                                                     \
-       extern void *__memscan_zero(void *, size_t);                    \
-       extern void *__memscan_generic(void *, int, size_t);            \
+       void *__memscan_zero(void *, size_t);                           \
+       void *__memscan_generic(void *, int, size_t);                   \
        void *__retval, *__addr = (__arg0);                             \
        size_t __size = (__arg2);                                       \
                                                                        \
@@ -46,14 +46,14 @@ extern void *memmove(void *, const void *, __kernel_size_t);
 })
 
 #define __HAVE_ARCH_MEMCMP
-extern int memcmp(const void *,const void *,__kernel_size_t);
+int memcmp(const void *,const void *,__kernel_size_t);
 
 /* Now the str*() stuff... */
 #define __HAVE_ARCH_STRLEN
-extern __kernel_size_t strlen(const char *);
+__kernel_size_t strlen(const char *);
 
 #define __HAVE_ARCH_STRNCMP
-extern int strncmp(const char *, const char *, __kernel_size_t);
+int strncmp(const char *, const char *, __kernel_size_t);
 
 #endif /* !EXPORT_SYMTAB_STROPS */
 
index e32e82b..16f1037 100644 (file)
@@ -99,8 +99,8 @@ extern struct thread_info *current_set[NR_CPUS];
          "o0", "o1", "o2", "o3",                   "o7");      \
        } while(0)
 
-extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
-                  void *fpqueue, unsigned long *fpqdepth);
-extern void synchronize_user_stack(void);
+void fpsave(unsigned long *fpregs, unsigned long *fsr,
+           void *fpqueue, unsigned long *fpqdepth);
+void synchronize_user_stack(void);
 
 #endif /* __SPARC_SWITCH_TO_H */
index 8d28480..10e7633 100644 (file)
@@ -65,7 +65,7 @@ do {  save_and_clear_fpu();                                           \
          "o0", "o1", "o2", "o3", "o4", "o5",       "o7");              \
 } while(0)
 
-extern void synchronize_user_stack(void);
-extern void fault_in_user_windows(void);
+void synchronize_user_stack(void);
+void fault_in_user_windows(void);
 
 #endif /* __SPARC64_SWITCH_TO_64_H */
index bf8972a..b0a0db8 100644 (file)
@@ -3,9 +3,9 @@
 
 struct pt_regs;
 
-extern asmlinkage long sparc_do_fork(unsigned long clone_flags,
-                                    unsigned long stack_start,
-                                    struct pt_regs *regs,
-                                    unsigned long stack_size);
+asmlinkage long sparc_do_fork(unsigned long clone_flags,
+                             unsigned long stack_start,
+                             struct pt_regs *regs,
+                             unsigned long stack_size);
 
 #endif /* _SPARC64_SYSCALLS_H */
index 72f40a5..f8e708a 100644 (file)
@@ -32,13 +32,13 @@ static inline unsigned int timer_value(unsigned int value)
        return (value + 1) << TIMER_VALUE_SHIFT;
 }
 
-extern __volatile__ unsigned int *master_l10_counter;
+extern volatile u32 __iomem *master_l10_counter;
 
-extern irqreturn_t notrace timer_interrupt(int dummy, void *dev_id);
+irqreturn_t notrace timer_interrupt(int dummy, void *dev_id);
 
 #ifdef CONFIG_SMP
 DECLARE_PER_CPU(struct clock_event_device, sparc32_clockevent);
-extern void register_percpu_ce(int cpu);
+void register_percpu_ce(int cpu);
 #endif
 
 #endif /* !(_SPARC_TIMER_H) */
index 01197d8..fce4150 100644 (file)
@@ -23,8 +23,8 @@ struct sparc64_tick_ops {
 
 extern struct sparc64_tick_ops *tick_ops;
 
-extern unsigned long sparc64_get_clock_tick(unsigned int cpu);
-extern void setup_sparc64_timer(void);
-extern void __init time_init(void);
+unsigned long sparc64_get_clock_tick(unsigned int cpu);
+void setup_sparc64_timer(void);
+void __init time_init(void);
 
 #endif /* _SPARC64_TIMER_H */
index 190e189..4cb392f 100644 (file)
@@ -8,19 +8,19 @@
 #include <asm/mmu_context.h>
 
 #ifdef CONFIG_SMP
-extern void smp_flush_tlb_pending(struct mm_struct *,
+void smp_flush_tlb_pending(struct mm_struct *,
                                  unsigned long, unsigned long *);
 #endif
 
 #ifdef CONFIG_SMP
-extern void smp_flush_tlb_mm(struct mm_struct *mm);
+void smp_flush_tlb_mm(struct mm_struct *mm);
 #define do_flush_tlb_mm(mm) smp_flush_tlb_mm(mm)
 #else
 #define do_flush_tlb_mm(mm) __flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT)
 #endif
 
-extern void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
-extern void flush_tlb_pending(void);
+void __flush_tlb_pending(unsigned long, unsigned long, unsigned long *);
+void flush_tlb_pending(void);
 
 #define tlb_start_vma(tlb, vma) do { } while (0)
 #define tlb_end_vma(tlb, vma)  do { } while (0)
index 3c3c89f..816d820 100644 (file)
@@ -14,9 +14,9 @@ struct tlb_batch {
        unsigned long vaddrs[TLB_BATCH_NR];
 };
 
-extern void flush_tsb_kernel_range(unsigned long start, unsigned long end);
-extern void flush_tsb_user(struct tlb_batch *tb);
-extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
+void flush_tsb_kernel_range(unsigned long start, unsigned long end);
+void flush_tsb_user(struct tlb_batch *tb);
+void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr);
 
 /* TLB flush operations. */
 
@@ -36,15 +36,15 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
 
 #define __HAVE_ARCH_ENTER_LAZY_MMU_MODE
 
-extern void flush_tlb_pending(void);
-extern void arch_enter_lazy_mmu_mode(void);
-extern void arch_leave_lazy_mmu_mode(void);
+void flush_tlb_pending(void);
+void arch_enter_lazy_mmu_mode(void);
+void arch_leave_lazy_mmu_mode(void);
 #define arch_flush_lazy_mmu_mode()      do {} while (0)
 
 /* Local cpu only.  */
-extern void __flush_tlb_all(void);
-extern void __flush_tlb_page(unsigned long context, unsigned long vaddr);
-extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void __flush_tlb_all(void);
+void __flush_tlb_page(unsigned long context, unsigned long vaddr);
+void __flush_tlb_kernel_range(unsigned long start, unsigned long end);
 
 #ifndef CONFIG_SMP
 
@@ -60,8 +60,8 @@ static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vad
 
 #else /* CONFIG_SMP */
 
-extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
-extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
+void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end);
+void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr);
 
 #define flush_tlb_kernel_range(start, end) \
 do {   flush_tsb_kernel_range(start,end); \
index a2d10fc..ed8f071 100644 (file)
@@ -18,7 +18,7 @@ static inline int cpu_to_node(int cpu)
 
 struct pci_bus;
 #ifdef CONFIG_PCI
-extern int pcibus_to_node(struct pci_bus *pbus);
+int pcibus_to_node(struct pci_bus *pbus);
 #else
 static inline int pcibus_to_node(struct pci_bus *pbus)
 {
index 7e26b2d..6fd4436 100644 (file)
@@ -51,11 +51,11 @@ struct trap_per_cpu {
        unsigned long           __per_cpu_base;
 } __attribute__((aligned(64)));
 extern struct trap_per_cpu trap_block[NR_CPUS];
-extern void init_cur_cpu_trap(struct thread_info *);
-extern void setup_tba(void);
+void init_cur_cpu_trap(struct thread_info *);
+void setup_tba(void);
 extern int ncpus_probed;
 
-extern unsigned long real_hard_smp_processor_id(void);
+unsigned long real_hard_smp_processor_id(void);
 
 struct cpuid_patch_entry {
        unsigned int    addr;
index 0167d26..bd56c28 100644 (file)
@@ -9,6 +9,6 @@
 #define user_addr_max() \
        (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
 
-extern long strncpy_from_user(char *dest, const char __user *src, long count);
+long strncpy_from_user(char *dest, const char __user *src, long count);
 
 #endif
index 53a28dd..9634d08 100644 (file)
@@ -78,9 +78,9 @@ struct exception_table_entry
 };
 
 /* Returns 0 if exception not found and fixup otherwise.  */
-extern unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
+unsigned long search_extables_range(unsigned long addr, unsigned long *g2);
 
-extern void __ret_efault(void);
+void __ret_efault(void);
 
 /* Uh, these should become the main single-value transfer routines..
  * They automatically use the right size if we just have the right
@@ -152,7 +152,7 @@ __asm__ __volatile__(                                                       \
        : "=&r" (ret) : "r" (x), "m" (*__m(addr)),                      \
         "i" (-EFAULT))
 
-extern int __put_user_bad(void);
+int __put_user_bad(void);
 
 #define __get_user_check(x,addr,size,type) ({ \
 register int __gu_ret; \
@@ -244,9 +244,9 @@ __asm__ __volatile__(                                                       \
        ".previous\n\t"                                                 \
        : "=&r" (x) : "m" (*__m(addr)), "i" (retval))
 
-extern int __get_user_bad(void);
+int __get_user_bad(void);
 
-extern unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
+unsigned long __copy_user(void __user *to, const void __user *from, unsigned long size);
 
 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
 {
@@ -306,8 +306,8 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
                return n;
 }
 
-extern __must_check long strlen_user(const char __user *str);
-extern __must_check long strnlen_user(const char __user *str, long n);
+__must_check long strlen_user(const char __user *str);
+__must_check long strnlen_user(const char __user *str, long n);
 
 #endif  /* __ASSEMBLY__ */
 
index ad7e178..c990a5e 100644 (file)
@@ -76,8 +76,8 @@ struct exception_table_entry {
         unsigned int insn, fixup;
 };
 
-extern void __ret_efault(void);
-extern void __retl_efault(void);
+void __ret_efault(void);
+void __retl_efault(void);
 
 /* Uh, these should become the main single-value transfer routines..
  * They automatically use the right size if we just have the right
@@ -134,7 +134,7 @@ __asm__ __volatile__(                                                       \
        : "=r" (ret) : "r" (x), "r" (__m(addr)),                                \
         "i" (-EFAULT))
 
-extern int __put_user_bad(void);
+int __put_user_bad(void);
 
 #define __get_user_nocheck(data,addr,size,type) ({ \
 register int __gu_ret; \
@@ -204,13 +204,13 @@ __asm__ __volatile__(                                                     \
        ".previous\n\t"                                                 \
        : "=r" (x) : "r" (__m(addr)), "i" (retval))
 
-extern int __get_user_bad(void);
+int __get_user_bad(void);
 
-extern unsigned long __must_check ___copy_from_user(void *to,
-                                                   const void __user *from,
-                                                   unsigned long size);
-extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
-                                         unsigned long size);
+unsigned long __must_check ___copy_from_user(void *to,
+                                            const void __user *from,
+                                            unsigned long size);
+unsigned long copy_from_user_fixup(void *to, const void __user *from,
+                                  unsigned long size);
 static inline unsigned long __must_check
 copy_from_user(void *to, const void __user *from, unsigned long size)
 {
@@ -223,11 +223,11 @@ copy_from_user(void *to, const void __user *from, unsigned long size)
 }
 #define __copy_from_user copy_from_user
 
-extern unsigned long __must_check ___copy_to_user(void __user *to,
-                                                 const void *from,
-                                                 unsigned long size);
-extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
-                                       unsigned long size);
+unsigned long __must_check ___copy_to_user(void __user *to,
+                                          const void *from,
+                                          unsigned long size);
+unsigned long copy_to_user_fixup(void __user *to, const void *from,
+                                unsigned long size);
 static inline unsigned long __must_check
 copy_to_user(void __user *to, const void *from, unsigned long size)
 {
@@ -239,11 +239,11 @@ copy_to_user(void __user *to, const void *from, unsigned long size)
 }
 #define __copy_to_user copy_to_user
 
-extern unsigned long __must_check ___copy_in_user(void __user *to,
-                                                 const void __user *from,
-                                                 unsigned long size);
-extern unsigned long copy_in_user_fixup(void __user *to, void __user *from,
-                                       unsigned long size);
+unsigned long __must_check ___copy_in_user(void __user *to,
+                                          const void __user *from,
+                                          unsigned long size);
+unsigned long copy_in_user_fixup(void __user *to, void __user *from,
+                                unsigned long size);
 static inline unsigned long __must_check
 copy_in_user(void __user *to, void __user *from, unsigned long size)
 {
@@ -255,20 +255,20 @@ copy_in_user(void __user *to, void __user *from, unsigned long size)
 }
 #define __copy_in_user copy_in_user
 
-extern unsigned long __must_check __clear_user(void __user *, unsigned long);
+unsigned long __must_check __clear_user(void __user *, unsigned long);
 
 #define clear_user __clear_user
 
-extern __must_check long strlen_user(const char __user *str);
-extern __must_check long strnlen_user(const char __user *str, long n);
+__must_check long strlen_user(const char __user *str);
+__must_check long strnlen_user(const char __user *str, long n);
 
 #define __copy_to_user_inatomic __copy_to_user
 #define __copy_from_user_inatomic __copy_from_user
 
 struct pt_regs;
-extern unsigned long compute_effective_address(struct pt_regs *,
-                                              unsigned int insn,
-                                              unsigned int rd);
+unsigned long compute_effective_address(struct pt_regs *,
+                                       unsigned int insn,
+                                       unsigned int rd);
 
 #endif  /* __ASSEMBLY__ */
 
index 432afa8..e0f6c39 100644 (file)
@@ -372,14 +372,14 @@ do {      if (vio->debug & VIO_DEBUG_##TYPE) \
                       vio->vdev->channel_id, ## a); \
 } while (0)
 
-extern int __vio_register_driver(struct vio_driver *drv, struct module *owner,
+int __vio_register_driver(struct vio_driver *drv, struct module *owner,
                                 const char *mod_name);
 /*
  * vio_register_driver must be a macro so that KBUILD_MODNAME can be expanded
  */
 #define vio_register_driver(driver)            \
        __vio_register_driver(driver, THIS_MODULE, KBUILD_MODNAME)
-extern void vio_unregister_driver(struct vio_driver *drv);
+void vio_unregister_driver(struct vio_driver *drv);
 
 static inline struct vio_driver *to_vio_driver(struct device_driver *drv)
 {
@@ -391,21 +391,21 @@ static inline struct vio_dev *to_vio_dev(struct device *dev)
        return container_of(dev, struct vio_dev, dev);
 }
 
-extern int vio_ldc_send(struct vio_driver_state *vio, void *data, int len);
-extern void vio_link_state_change(struct vio_driver_state *vio, int event);
-extern void vio_conn_reset(struct vio_driver_state *vio);
-extern int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt);
-extern int vio_validate_sid(struct vio_driver_state *vio,
-                           struct vio_msg_tag *tp);
-extern u32 vio_send_sid(struct vio_driver_state *vio);
-extern int vio_ldc_alloc(struct vio_driver_state *vio,
-                        struct ldc_channel_config *base_cfg, void *event_arg);
-extern void vio_ldc_free(struct vio_driver_state *vio);
-extern int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
-                          u8 dev_class, struct vio_version *ver_table,
-                          int ver_table_size, struct vio_driver_ops *ops,
-                          char *name);
-
-extern void vio_port_up(struct vio_driver_state *vio);
+int vio_ldc_send(struct vio_driver_state *vio, void *data, int len);
+void vio_link_state_change(struct vio_driver_state *vio, int event);
+void vio_conn_reset(struct vio_driver_state *vio);
+int vio_control_pkt_engine(struct vio_driver_state *vio, void *pkt);
+int vio_validate_sid(struct vio_driver_state *vio,
+                    struct vio_msg_tag *tp);
+u32 vio_send_sid(struct vio_driver_state *vio);
+int vio_ldc_alloc(struct vio_driver_state *vio,
+                 struct ldc_channel_config *base_cfg, void *event_arg);
+void vio_ldc_free(struct vio_driver_state *vio);
+int vio_driver_init(struct vio_driver_state *vio, struct vio_dev *vdev,
+                   u8 dev_class, struct vio_version *ver_table,
+                   int ver_table_size, struct vio_driver_ops *ops,
+                   char *name);
+
+void vio_port_up(struct vio_driver_state *vio);
 
 #endif /* _SPARC64_VIO_H */
index 39ca301..b266737 100644 (file)
@@ -57,7 +57,8 @@ static inline void save_and_clear_fpu(void) {
 "              " : : "i" (FPRS_FEF|FPRS_DU) :
                "o5", "g1", "g2", "g3", "g7", "cc");
 }
-extern int vis_emul(struct pt_regs *, unsigned int);
+
+int vis_emul(struct pt_regs *, unsigned int);
 #endif
 
 #endif /* _SPARC64_ASI_H */
index ee8edc6..50c8828 100644 (file)
 
 #include <asm/spitfire.h>
 
-extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
-                     unsigned long *);
-extern void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
-                     unsigned long *, unsigned long *);
-extern void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
-                     unsigned long *, unsigned long *, unsigned long *);
+void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
+void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
+              unsigned long *);
+void xor_vis_4(unsigned long, unsigned long *, unsigned long *,
+              unsigned long *, unsigned long *);
+void xor_vis_5(unsigned long, unsigned long *, unsigned long *,
+              unsigned long *, unsigned long *, unsigned long *);
 
 /* XXX Ugh, write cheetah versions... -DaveM */
 
@@ -38,13 +38,13 @@ static struct xor_block_template xor_block_VIS = {
         .do_5  = xor_vis_5,
 };
 
-extern void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
-extern void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
-                         unsigned long *);
-extern void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
-                         unsigned long *, unsigned long *);
-extern void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
-                         unsigned long *, unsigned long *, unsigned long *);
+void xor_niagara_2(unsigned long, unsigned long *, unsigned long *);
+void xor_niagara_3(unsigned long, unsigned long *, unsigned long *,
+                  unsigned long *);
+void xor_niagara_4(unsigned long, unsigned long *, unsigned long *,
+                  unsigned long *, unsigned long *);
+void xor_niagara_5(unsigned long, unsigned long *, unsigned long *,
+                  unsigned long *, unsigned long *, unsigned long *);
 
 static struct xor_block_template xor_block_niagara = {
         .name  = "Niagara",
index b73274f..42f2bca 100644 (file)
 #define __NR_finit_module      342
 #define __NR_sched_setattr     343
 #define __NR_sched_getattr     344
+#define __NR_renameat2         345
 
-#define NR_syscalls            345
+#define NR_syscalls            346
 
 /* Bitmask values returned from kern_features system call.  */
 #define KERN_FEATURE_MIXED_MODE_STACK  0x00000001
index d15cc17..7cf9c6e 100644 (file)
@@ -42,7 +42,6 @@ obj-y                   += time_$(BITS).o
 obj-$(CONFIG_SPARC32)   += windows.o
 obj-y                   += cpu.o
 obj-$(CONFIG_SPARC32)   += devices.o
-obj-$(CONFIG_SPARC32)   += tadpole.o
 obj-y                   += ptrace_$(BITS).o
 obj-y                   += unaligned_$(BITS).o
 obj-y                   += una_asm_$(BITS).o
index 8fff0ac..24361b4 100644 (file)
@@ -3,6 +3,8 @@
 #include <linux/audit.h>
 #include <asm/unistd.h>
 
+#include "kernel.h"
+
 static unsigned dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
 ~0U
@@ -40,7 +42,6 @@ int audit_classify_arch(int arch)
 int audit_classify_syscall(int abi, unsigned syscall)
 {
 #ifdef CONFIG_COMPAT
-       extern int sparc32_classify_syscall(unsigned);
        if (abi == AUDIT_ARCH_SPARC)
                return sparc32_classify_syscall(syscall);
 #endif
@@ -61,11 +62,6 @@ int audit_classify_syscall(int abi, unsigned syscall)
 static int __init audit_classes_init(void)
 {
 #ifdef CONFIG_COMPAT
-       extern __u32 sparc32_dir_class[];
-       extern __u32 sparc32_write_class[];
-       extern __u32 sparc32_read_class[];
-       extern __u32 sparc32_chattr_class[];
-       extern __u32 sparc32_signal_class[];
        audit_register_class(AUDIT_CLASS_WRITE_32, sparc32_write_class);
        audit_register_class(AUDIT_CLASS_READ_32, sparc32_read_class);
        audit_register_class(AUDIT_CLASS_DIR_WRITE_32, sparc32_dir_class);
index e20cc55..ae88c22 100644 (file)
@@ -9,12 +9,15 @@
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/export.h>
+
 #include <asm/oplib.h>
 #include <asm/io.h>
 #include <asm/auxio.h>
 #include <asm/string.h>                /* memset(), Linux has no bzero() */
 #include <asm/cpu_type.h>
 
+#include "kernel.h"
+
 /* Probe and map in the Auxiliary I/O register */
 
 /* auxio_register is not static because it is referenced 
@@ -103,7 +106,7 @@ EXPORT_SYMBOL(set_auxio);
 
 /* sun4m power control register (AUXIO2) */
 
-volatile unsigned char * auxio_power_register = NULL;
+volatile u8 __iomem *auxio_power_register = NULL;
 
 void __init auxio_power_probe(void)
 {
@@ -127,8 +130,8 @@ void __init auxio_power_probe(void)
        r.flags = regs.which_io & 0xF;
        r.start = regs.phys_addr;
        r.end = regs.phys_addr + regs.reg_size - 1;
-       auxio_power_register = (unsigned char *) of_ioremap(&r, 0,
-           regs.reg_size, "auxpower");
+       auxio_power_register =
+               (u8 __iomem *)of_ioremap(&r, 0, regs.reg_size, "auxpower");
 
        /* Display a quick message on the console. */
        if (auxio_power_register)
index 57073e5..987f7ec 100644 (file)
@@ -137,7 +137,7 @@ static void scrollscreen(void)
 }
 #endif /* ndef NO_SCROLL */
 
-void btext_drawchar(char c)
+static void btext_drawchar(char c)
 {
        int cline = 0;
 #ifdef NO_SCROLL
index d865575..7062263 100644 (file)
@@ -1,5 +1,6 @@
 #define __32bit_syscall_numbers__
 #include <asm/unistd.h>
+#include "kernel.h"
 
 unsigned sparc32_dir_class[] = {
 #include <asm-generic/audit_dir_write.h>
index 5c51258..82a3a71 100644 (file)
@@ -22,6 +22,7 @@
 #include <asm/cpudata.h>
 
 #include "kernel.h"
+#include "entry.h"
 
 DEFINE_PER_CPU(cpuinfo_sparc, __cpu_data) = { 0 };
 EXPORT_PER_CPU_SYMBOL(__cpu_data);
index e639880..9dac398 100644 (file)
@@ -2,8 +2,8 @@
 #define _CPUMAP_H
 
 #ifdef CONFIG_SMP
-extern void cpu_map_rebuild(void);
-extern int  map_to_cpu(unsigned int index);
+void cpu_map_rebuild(void);
+int map_to_cpu(unsigned int index);
 #define cpu_map_init() cpu_map_rebuild()
 #else
 #define cpu_map_init() do {} while (0)
index 3d465e8..8d5d09f 100644 (file)
@@ -19,8 +19,9 @@
 #include <asm/smp.h>
 #include <asm/cpudata.h>
 #include <asm/cpu_type.h>
+#include <asm/setup.h>
 
-extern void clock_stop_probe(void); /* tadpole.c */
+#include "kernel.h"
 
 static char *cpu_mid_prop(void)
 {
@@ -131,11 +132,6 @@ void __init device_scan(void)
        }
 #endif /* !CONFIG_SMP */
 
-       {
-               extern void auxio_probe(void);
-               extern void auxio_power_probe(void);
-               auxio_probe();
-               auxio_power_probe();
-       }
-       clock_stop_probe();
+       auxio_probe();
+       auxio_power_probe();
 }
index 140966f..ebaba61 100644 (file)
@@ -6,40 +6,39 @@
 #include <linux/init.h>
 
 /* irq */
-extern void handler_irq(int irq, struct pt_regs *regs);
+void handler_irq(int irq, struct pt_regs *regs);
 
 #ifdef CONFIG_SPARC32
 /* traps */
-extern void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
-extern void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
-                                   unsigned long npc, unsigned long psr);
-
-extern void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
-                                unsigned long npc, unsigned long psr);
-extern void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
-                                   unsigned long npc,
-                                   unsigned long psr);
-extern void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
+void do_hw_interrupt(struct pt_regs *regs, unsigned long type);
+void do_illegal_instruction(struct pt_regs *regs, unsigned long pc,
+                            unsigned long npc, unsigned long psr);
+
+void do_priv_instruction(struct pt_regs *regs, unsigned long pc,
+                         unsigned long npc, unsigned long psr);
+void do_memaccess_unaligned(struct pt_regs *regs, unsigned long pc,
+                            unsigned long npc, unsigned long psr);
+void do_fpd_trap(struct pt_regs *regs, unsigned long pc,
+                 unsigned long npc, unsigned long psr);
+void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
+                 unsigned long npc, unsigned long psr);
+void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
+                         unsigned long npc, unsigned long psr);
+void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
+                       unsigned long npc, unsigned long psr);
+void handle_reg_access(struct pt_regs *regs, unsigned long pc,
+                       unsigned long npc, unsigned long psr);
+void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
                         unsigned long npc, unsigned long psr);
-extern void do_fpe_trap(struct pt_regs *regs, unsigned long pc,
-                        unsigned long npc, unsigned long psr);
-extern void handle_tag_overflow(struct pt_regs *regs, unsigned long pc,
-                                unsigned long npc, unsigned long psr);
-extern void handle_watchpoint(struct pt_regs *regs, unsigned long pc,
-                              unsigned long npc, unsigned long psr);
-extern void handle_reg_access(struct pt_regs *regs, unsigned long pc,
-                              unsigned long npc, unsigned long psr);
-extern void handle_cp_disabled(struct pt_regs *regs, unsigned long pc,
-                               unsigned long npc, unsigned long psr);
-extern void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
-                                unsigned long npc, unsigned long psr);
+void handle_cp_exception(struct pt_regs *regs, unsigned long pc,
+                         unsigned long npc, unsigned long psr);
 
 
 
 /* entry.S */
-extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
-                   void *fpqueue, unsigned long *fpqdepth);
-extern void fpload(unsigned long *fpregs, unsigned long *fsr);
+void fpsave(unsigned long *fpregs, unsigned long *fsr,
+            void *fpqueue, unsigned long *fpqdepth);
+void fpload(unsigned long *fpregs, unsigned long *fsr);
 
 #else /* CONFIG_SPARC32 */
 
@@ -66,123 +65,123 @@ struct pause_patch_entry {
 extern struct pause_patch_entry __pause_3insn_patch,
        __pause_3insn_patch_end;
 
-extern void __init per_cpu_patch(void);
-extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
-                                   struct sun4v_1insn_patch_entry *);
-extern void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
-                                   struct sun4v_2insn_patch_entry *);
-extern void __init sun4v_patch(void);
-extern void __init boot_cpu_id_too_large(int cpu);
+void __init per_cpu_patch(void);
+void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
+                            struct sun4v_1insn_patch_entry *);
+void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+                            struct sun4v_2insn_patch_entry *);
+void __init sun4v_patch(void);
+void __init boot_cpu_id_too_large(int cpu);
 extern unsigned int dcache_parity_tl1_occurred;
 extern unsigned int icache_parity_tl1_occurred;
 
-extern asmlinkage void sparc_breakpoint(struct pt_regs *regs);
-extern void timer_interrupt(int irq, struct pt_regs *regs);
-
-extern void do_notify_resume(struct pt_regs *regs,
-                            unsigned long orig_i0,
-                            unsigned long thread_info_flags);
-
-extern asmlinkage int syscall_trace_enter(struct pt_regs *regs);
-extern asmlinkage void syscall_trace_leave(struct pt_regs *regs);
-
-extern void bad_trap_tl1(struct pt_regs *regs, long lvl);
-
-extern void do_fpieee(struct pt_regs *regs);
-extern void do_fpother(struct pt_regs *regs);
-extern void do_tof(struct pt_regs *regs);
-extern void do_div0(struct pt_regs *regs);
-extern void do_illegal_instruction(struct pt_regs *regs);
-extern void mem_address_unaligned(struct pt_regs *regs,
-                                 unsigned long sfar,
-                                 unsigned long sfsr);
-extern void sun4v_do_mna(struct pt_regs *regs,
-                        unsigned long addr,
-                        unsigned long type_ctx);
-extern void do_privop(struct pt_regs *regs);
-extern void do_privact(struct pt_regs *regs);
-extern void do_cee(struct pt_regs *regs);
-extern void do_cee_tl1(struct pt_regs *regs);
-extern void do_dae_tl1(struct pt_regs *regs);
-extern void do_iae_tl1(struct pt_regs *regs);
-extern void do_div0_tl1(struct pt_regs *regs);
-extern void do_fpdis_tl1(struct pt_regs *regs);
-extern void do_fpieee_tl1(struct pt_regs *regs);
-extern void do_fpother_tl1(struct pt_regs *regs);
-extern void do_ill_tl1(struct pt_regs *regs);
-extern void do_irq_tl1(struct pt_regs *regs);
-extern void do_lddfmna_tl1(struct pt_regs *regs);
-extern void do_stdfmna_tl1(struct pt_regs *regs);
-extern void do_paw(struct pt_regs *regs);
-extern void do_paw_tl1(struct pt_regs *regs);
-extern void do_vaw(struct pt_regs *regs);
-extern void do_vaw_tl1(struct pt_regs *regs);
-extern void do_tof_tl1(struct pt_regs *regs);
-extern void do_getpsr(struct pt_regs *regs);
-
-extern void spitfire_insn_access_exception(struct pt_regs *regs,
-                                          unsigned long sfsr,
-                                          unsigned long sfar);
-extern void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
-                                              unsigned long sfsr,
-                                              unsigned long sfar);
-extern void spitfire_data_access_exception(struct pt_regs *regs,
-                                          unsigned long sfsr,
-                                          unsigned long sfar);
-extern void spitfire_data_access_exception_tl1(struct pt_regs *regs,
-                                              unsigned long sfsr,
-                                              unsigned long sfar);
-extern void spitfire_access_error(struct pt_regs *regs,
-                                 unsigned long status_encoded,
-                                 unsigned long afar);
-
-extern void cheetah_fecc_handler(struct pt_regs *regs,
-                                unsigned long afsr,
-                                unsigned long afar);
-extern void cheetah_cee_handler(struct pt_regs *regs,
-                               unsigned long afsr,
-                               unsigned long afar);
-extern void cheetah_deferred_handler(struct pt_regs *regs,
-                                    unsigned long afsr,
-                                    unsigned long afar);
-extern void cheetah_plus_parity_error(int type, struct pt_regs *regs);
-
-extern void sun4v_insn_access_exception(struct pt_regs *regs,
-                                       unsigned long addr,
-                                       unsigned long type_ctx);
-extern void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
-                                           unsigned long addr,
-                                           unsigned long type_ctx);
-extern void sun4v_data_access_exception(struct pt_regs *regs,
-                                       unsigned long addr,
-                                       unsigned long type_ctx);
-extern void sun4v_data_access_exception_tl1(struct pt_regs *regs,
-                                           unsigned long addr,
-                                           unsigned long type_ctx);
-extern void sun4v_resum_error(struct pt_regs *regs,
-                             unsigned long offset);
-extern void sun4v_resum_overflow(struct pt_regs *regs);
-extern void sun4v_nonresum_error(struct pt_regs *regs,
-                                unsigned long offset);
-extern void sun4v_nonresum_overflow(struct pt_regs *regs);
+asmlinkage void sparc_breakpoint(struct pt_regs *regs);
+void timer_interrupt(int irq, struct pt_regs *regs);
+
+void do_notify_resume(struct pt_regs *regs,
+                     unsigned long orig_i0,
+                     unsigned long thread_info_flags);
+
+asmlinkage int syscall_trace_enter(struct pt_regs *regs);
+asmlinkage void syscall_trace_leave(struct pt_regs *regs);
+
+void bad_trap_tl1(struct pt_regs *regs, long lvl);
+
+void do_fpieee(struct pt_regs *regs);
+void do_fpother(struct pt_regs *regs);
+void do_tof(struct pt_regs *regs);
+void do_div0(struct pt_regs *regs);
+void do_illegal_instruction(struct pt_regs *regs);
+void mem_address_unaligned(struct pt_regs *regs,
+                          unsigned long sfar,
+                          unsigned long sfsr);
+void sun4v_do_mna(struct pt_regs *regs,
+                 unsigned long addr,
+                 unsigned long type_ctx);
+void do_privop(struct pt_regs *regs);
+void do_privact(struct pt_regs *regs);
+void do_cee(struct pt_regs *regs);
+void do_cee_tl1(struct pt_regs *regs);
+void do_dae_tl1(struct pt_regs *regs);
+void do_iae_tl1(struct pt_regs *regs);
+void do_div0_tl1(struct pt_regs *regs);
+void do_fpdis_tl1(struct pt_regs *regs);
+void do_fpieee_tl1(struct pt_regs *regs);
+void do_fpother_tl1(struct pt_regs *regs);
+void do_ill_tl1(struct pt_regs *regs);
+void do_irq_tl1(struct pt_regs *regs);
+void do_lddfmna_tl1(struct pt_regs *regs);
+void do_stdfmna_tl1(struct pt_regs *regs);
+void do_paw(struct pt_regs *regs);
+void do_paw_tl1(struct pt_regs *regs);
+void do_vaw(struct pt_regs *regs);
+void do_vaw_tl1(struct pt_regs *regs);
+void do_tof_tl1(struct pt_regs *regs);
+void do_getpsr(struct pt_regs *regs);
+
+void spitfire_insn_access_exception(struct pt_regs *regs,
+                                   unsigned long sfsr,
+                                   unsigned long sfar);
+void spitfire_insn_access_exception_tl1(struct pt_regs *regs,
+                                       unsigned long sfsr,
+                                       unsigned long sfar);
+void spitfire_data_access_exception(struct pt_regs *regs,
+                                   unsigned long sfsr,
+                                   unsigned long sfar);
+void spitfire_data_access_exception_tl1(struct pt_regs *regs,
+                                       unsigned long sfsr,
+                                       unsigned long sfar);
+void spitfire_access_error(struct pt_regs *regs,
+                          unsigned long status_encoded,
+                          unsigned long afar);
+
+void cheetah_fecc_handler(struct pt_regs *regs,
+                         unsigned long afsr,
+                         unsigned long afar);
+void cheetah_cee_handler(struct pt_regs *regs,
+                        unsigned long afsr,
+                        unsigned long afar);
+void cheetah_deferred_handler(struct pt_regs *regs,
+                             unsigned long afsr,
+                             unsigned long afar);
+void cheetah_plus_parity_error(int type, struct pt_regs *regs);
+
+void sun4v_insn_access_exception(struct pt_regs *regs,
+                                unsigned long addr,
+                                unsigned long type_ctx);
+void sun4v_insn_access_exception_tl1(struct pt_regs *regs,
+                                    unsigned long addr,
+                                    unsigned long type_ctx);
+void sun4v_data_access_exception(struct pt_regs *regs,
+                                unsigned long addr,
+                                unsigned long type_ctx);
+void sun4v_data_access_exception_tl1(struct pt_regs *regs,
+                                    unsigned long addr,
+                                    unsigned long type_ctx);
+void sun4v_resum_error(struct pt_regs *regs,
+                      unsigned long offset);
+void sun4v_resum_overflow(struct pt_regs *regs);
+void sun4v_nonresum_error(struct pt_regs *regs,
+                         unsigned long offset);
+void sun4v_nonresum_overflow(struct pt_regs *regs);
 
 extern unsigned long sun4v_err_itlb_vaddr;
 extern unsigned long sun4v_err_itlb_ctx;
 extern unsigned long sun4v_err_itlb_pte;
 extern unsigned long sun4v_err_itlb_error;
 
-extern void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
+void sun4v_itlb_error_report(struct pt_regs *regs, int tl);
 
 extern unsigned long sun4v_err_dtlb_vaddr;
 extern unsigned long sun4v_err_dtlb_ctx;
 extern unsigned long sun4v_err_dtlb_pte;
 extern unsigned long sun4v_err_dtlb_error;
 
-extern void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
-extern void hypervisor_tlbop_error(unsigned long err,
-                                  unsigned long op);
-extern void hypervisor_tlbop_error_xcall(unsigned long err,
-                                        unsigned long op);
+void sun4v_dtlb_error_report(struct pt_regs *regs, int tl);
+void hypervisor_tlbop_error(unsigned long err,
+                           unsigned long op);
+void hypervisor_tlbop_error_xcall(unsigned long err,
+                                 unsigned long op);
 
 /* WARNING: The error trap handlers in assembly know the precise
  *         layout of the following structure.
@@ -248,8 +247,8 @@ struct ino_bucket {
 extern struct ino_bucket *ivector_table;
 extern unsigned long ivector_table_pa;
 
-extern void init_irqwork_curcpu(void);
-extern void sun4v_register_mondo_queues(int this_cpu);
+void init_irqwork_curcpu(void);
+void sun4v_register_mondo_queues(int this_cpu);
 
 #endif /* CONFIG_SPARC32 */
 #endif /* _ENTRY_H */
index 76663b0..bfa4d0c 100644 (file)
@@ -21,6 +21,7 @@
 #include <asm/iommu.h>
 
 #include "iommu_common.h"
+#include "kernel.h"
 
 #define STC_CTXMATCH_ADDR(STC, CTX)    \
        ((STC)->strbuf_ctxmatch_base + ((CTX) << 3))
@@ -840,8 +841,6 @@ static struct dma_map_ops sun4u_dma_ops = {
 struct dma_map_ops *dma_ops = &sun4u_dma_ops;
 EXPORT_SYMBOL(dma_ops);
 
-extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
-
 int dma_supported(struct device *dev, u64 device_mask)
 {
        struct iommu *iommu = dev->archdata.iommu;
index 591f587..1ec0de4 100644 (file)
@@ -48,12 +48,12 @@ static inline int is_span_boundary(unsigned long entry,
        return iommu_is_span_boundary(entry, nr, shift, boundary_size);
 }
 
-extern unsigned long iommu_range_alloc(struct device *dev,
-                                      struct iommu *iommu,
-                                      unsigned long npages,
-                                      unsigned long *handle);
-extern void iommu_range_free(struct iommu *iommu,
-                            dma_addr_t dma_addr,
-                            unsigned long npages);
+unsigned long iommu_range_alloc(struct device *dev,
+                               struct iommu *iommu,
+                               unsigned long npages,
+                               unsigned long *handle);
+void iommu_range_free(struct iommu *iommu,
+                     dma_addr_t dma_addr,
+                     unsigned long npages);
 
 #endif /* _IOMMU_COMMON_H */
index e7e215d..7f08ec8 100644 (file)
@@ -186,7 +186,7 @@ static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
 
        if (name == NULL) name = "???";
 
-       if ((xres = xres_alloc()) != 0) {
+       if ((xres = xres_alloc()) != NULL) {
                tack = xres->xname;
                res = &xres->xres;
        } else {
@@ -400,7 +400,7 @@ static void sbus_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
        BUG();
 }
 
-struct dma_map_ops sbus_dma_ops = {
+static struct dma_map_ops sbus_dma_ops = {
        .alloc                  = sbus_alloc_coherent,
        .free                   = sbus_free_coherent,
        .map_page               = sbus_map_page,
@@ -681,7 +681,7 @@ static int sparc_io_proc_show(struct seq_file *m, void *v)
        const char *nm;
 
        for (r = root->child; r != NULL; r = r->sibling) {
-               if ((nm = r->name) == 0) nm = "???";
+               if ((nm = r->name) == NULL) nm = "???";
                seq_printf(m, "%016llx-%016llx: %s\n",
                                (unsigned long long)r->start,
                                (unsigned long long)r->end, nm);
index b66b6aa..70a0b8d 100644 (file)
@@ -82,11 +82,20 @@ void handler_irq(unsigned int pil, struct pt_regs *regs);
 
 unsigned long leon_get_irqmask(unsigned int irq);
 
+/* irq_32.c */
+void sparc_floppy_irq(int irq, void *dev_id, struct pt_regs *regs);
+
+/* sun4m_irq.c */
+void sun4m_nmi(struct pt_regs *regs);
+
+/* sun4d_irq.c */
+void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs);
+
 #ifdef CONFIG_SMP
 
 /* All SUN4D IPIs are sent on this IRQ, may be shared with hard IRQs */
 #define SUN4D_IPI_IRQ 13
 
-extern void sun4d_ipi_interrupt(void);
+void sun4d_ipi_interrupt(void);
 
 #endif
index c145f6f..a979e99 100644 (file)
@@ -17,6 +17,7 @@
 
 #include <asm/cacheflush.h>
 #include <asm/cpudata.h>
+#include <asm/setup.h>
 #include <asm/pcic.h>
 #include <asm/leon.h>
 
index a702d9a..e7f652b 100644 (file)
@@ -2,6 +2,7 @@
 #define __SPARC_KERNEL_H
 
 #include <linux/interrupt.h>
+#include <linux/ftrace.h>
 
 #include <asm/traps.h>
 #include <asm/head.h>
@@ -15,62 +16,111 @@ extern int ncpus_probed;
 #ifdef CONFIG_SPARC64
 /* setup_64.c */
 struct seq_file;
-extern void cpucap_info(struct seq_file *);
+void cpucap_info(struct seq_file *);
 
-static inline unsigned long kimage_addr_to_ra(const char *p)
+static inline unsigned long kimage_addr_to_ra(const void *p)
 {
        unsigned long val = (unsigned long) p;
 
        return kern_base + (val - KERNBASE);
 }
+
+/* sys_sparc_64.c */
+asmlinkage long sys_kern_features(void);
+
+/* unaligned_64.c */
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+int handle_popc(u32 insn, struct pt_regs *regs);
+void handle_lddfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
+void handle_stdfmna(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr);
+
+/* smp_64.c */
+void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_new_mmu_context_version_client(int irq, struct pt_regs *regs);
+void __irq_entry smp_penguin_jailcell(int irq, struct pt_regs *regs);
+void __irq_entry smp_receive_signal_client(int irq, struct pt_regs *regs);
+
+/* kgdb_64.c */
+void __irq_entry smp_kgdb_capture_client(int irq, struct pt_regs *regs);
+
+/* pci.c */
+int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
+
+/* signal32.c */
+void do_sigreturn32(struct pt_regs *regs);
+asmlinkage void do_rt_sigreturn32(struct pt_regs *regs);
+void do_signal32(struct pt_regs * regs);
+asmlinkage int do_sys32_sigstack(u32 u_ssptr, u32 u_ossptr, unsigned long sp);
+
+/* compat_audit.c */
+extern unsigned sparc32_dir_class[];
+extern unsigned sparc32_chattr_class[];
+extern unsigned sparc32_write_class[];
+extern unsigned sparc32_read_class[];
+extern unsigned sparc32_signal_class[];
+int sparc32_classify_syscall(unsigned syscall);
 #endif
 
 #ifdef CONFIG_SPARC32
 /* setup_32.c */
+struct linux_romvec;
 void sparc32_start_kernel(struct linux_romvec *rp);
 
 /* cpu.c */
-extern void cpu_probe(void);
+void cpu_probe(void);
 
 /* traps_32.c */
-extern void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
-                              unsigned long npc, unsigned long psr);
+void handle_hw_divzero(struct pt_regs *regs, unsigned long pc,
+                       unsigned long npc, unsigned long psr);
 /* irq_32.c */
 extern struct irqaction static_irqaction[];
 extern int static_irq_count;
 extern spinlock_t irq_action_lock;
 
-extern void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
-extern void init_IRQ(void);
+void unexpected_irq(int irq, void *dev_id, struct pt_regs * regs);
+void init_IRQ(void);
 
 /* sun4m_irq.c */
-extern void sun4m_init_IRQ(void);
-extern void sun4m_unmask_profile_irq(void);
-extern void sun4m_clear_profile_irq(int cpu);
+void sun4m_init_IRQ(void);
+void sun4m_unmask_profile_irq(void);
+void sun4m_clear_profile_irq(int cpu);
 
 /* sun4m_smp.c */
 void sun4m_cpu_pre_starting(void *arg);
 void sun4m_cpu_pre_online(void *arg);
+void __init smp4m_boot_cpus(void);
+int smp4m_boot_one_cpu(int i, struct task_struct *idle);
+void __init smp4m_smp_done(void);
+void smp4m_cross_call_irq(void);
+void smp4m_percpu_timer_interrupt(struct pt_regs *regs);
 
 /* sun4d_irq.c */
 extern spinlock_t sun4d_imsk_lock;
 
-extern void sun4d_init_IRQ(void);
-extern int sun4d_request_irq(unsigned int irq,
-                             irq_handler_t handler,
-                             unsigned long irqflags,
-                             const char *devname, void *dev_id);
-extern int show_sun4d_interrupts(struct seq_file *, void *);
-extern void sun4d_distribute_irqs(void);
-extern void sun4d_free_irq(unsigned int irq, void *dev_id);
+void sun4d_init_IRQ(void);
+int sun4d_request_irq(unsigned int irq,
+                      irq_handler_t handler,
+                      unsigned long irqflags,
+                      const char *devname, void *dev_id);
+int show_sun4d_interrupts(struct seq_file *, void *);
+void sun4d_distribute_irqs(void);
+void sun4d_free_irq(unsigned int irq, void *dev_id);
 
 /* sun4d_smp.c */
 void sun4d_cpu_pre_starting(void *arg);
 void sun4d_cpu_pre_online(void *arg);
+void __init smp4d_boot_cpus(void);
+int smp4d_boot_one_cpu(int i, struct task_struct *idle);
+void __init smp4d_smp_done(void);
+void smp4d_cross_call_irq(void);
+void smp4d_percpu_timer_interrupt(struct pt_regs *regs);
 
 /* leon_smp.c */
 void leon_cpu_pre_starting(void *arg);
 void leon_cpu_pre_online(void *arg);
+void leonsmp_ipi_interrupt(void);
+void leon_cross_call_irq(void);
 
 /* head_32.S */
 extern unsigned int t_nmi[];
@@ -89,12 +139,48 @@ extern unsigned int real_irq_entry[];
 extern unsigned int smp4d_ticker[];
 extern unsigned int patchme_maybe_smp_msg[];
 
-extern void floppy_hardint(void);
+void floppy_hardint(void);
 
 /* trampoline_32.S */
 extern unsigned long sun4m_cpu_startup;
 extern unsigned long sun4d_cpu_startup;
 
+/* process_32.c */
+asmlinkage int sparc_do_fork(unsigned long clone_flags,
+                             unsigned long stack_start,
+                             struct pt_regs *regs,
+                             unsigned long stack_size);
+
+/* signal_32.c */
+asmlinkage void do_sigreturn(struct pt_regs *regs);
+asmlinkage void do_rt_sigreturn(struct pt_regs *regs);
+void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
+                      unsigned long thread_info_flags);
+asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
+                               struct sigstack __user *ossptr,
+                               unsigned long sp);
+
+/* ptrace_32.c */
+asmlinkage int syscall_trace(struct pt_regs *regs, int syscall_exit_p);
+
+/* unaligned_32.c */
+asmlinkage void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+asmlinkage void user_unaligned_trap(struct pt_regs *regs, unsigned int insn);
+
+/* windows.c */
+void try_to_clear_window_buffer(struct pt_regs *regs, int who);
+
+/* auxio_32.c */
+void __init auxio_probe(void);
+void __init auxio_power_probe(void);
+
+/* pcic.c */
+extern void __iomem *pcic_regs;
+void pcic_nmi(unsigned int pend, struct pt_regs *regs);
+
+/* time_32.c */
+void __init time_init(void);
+
 #else /* CONFIG_SPARC32 */
 #endif /* CONFIG_SPARC32 */
 #endif /* !(__SPARC_KERNEL_H) */
index b45fe3f..cbf21d0 100644 (file)
@@ -13,6 +13,8 @@
 #include <asm/ptrace.h>
 #include <asm/irq.h>
 
+#include "kernel.h"
+
 void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
 {
        struct reg_window *win;
index 1b09735..98d7128 100644 (file)
@@ -512,7 +512,8 @@ void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
 /*
  * Called when the probe at kretprobe trampoline is hit
  */
-int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
+static int __kprobes trampoline_probe_handler(struct kprobe *p,
+                                             struct pt_regs *regs)
 {
        struct kretprobe_instance *ri = NULL;
        struct hlist_head *head, empty_rp;
@@ -576,7 +577,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
        return 1;
 }
 
-void kretprobe_trampoline_holder(void)
+static void __used kretprobe_trampoline_holder(void)
 {
        asm volatile(".global kretprobe_trampoline\n"
                     "kretprobe_trampoline:\n"
index b7c6897..683c4af 100644 (file)
@@ -32,12 +32,12 @@ struct leon3_gptimer_regs_map *leon3_gptimer_regs; /* timer controller base addr
 
 int leondebug_irq_disable;
 int leon_debug_irqout;
-static int dummy_master_l10_counter;
+static volatile u32 dummy_master_l10_counter;
 unsigned long amba_system_id;
 static DEFINE_SPINLOCK(leon_irq_lock);
 
+static unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
 unsigned long leon3_gptimer_irq; /* interrupt controller irq number */
-unsigned long leon3_gptimer_idx; /* Timer Index (0..6) within Timer Core */
 unsigned int sparc_leon_eirq;
 #define LEON_IMASK(cpu) (&leon3_irqctrl_regs->mask[cpu])
 #define LEON_IACK (&leon3_irqctrl_regs->iclear)
@@ -65,7 +65,7 @@ static void leon_handle_ext_irq(unsigned int irq, struct irq_desc *desc)
 }
 
 /* The extended IRQ controller has been found, this function registers it */
-void leon_eirq_setup(unsigned int eirq)
+static void leon_eirq_setup(unsigned int eirq)
 {
        unsigned long mask, oldmask;
        unsigned int veirq;
@@ -270,7 +270,7 @@ static u32 leon_cycles_offset(void)
 #ifdef CONFIG_SMP
 
 /* smp clockevent irq */
-irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
+static irqreturn_t leon_percpu_timer_ce_interrupt(int irq, void *unused)
 {
        struct clock_event_device *ce;
        int cpu = smp_processor_id();
@@ -313,7 +313,7 @@ void __init leon_init_timers(void)
 
        leondebug_irq_disable = 0;
        leon_debug_irqout = 0;
-       master_l10_counter = (unsigned int *)&dummy_master_l10_counter;
+       master_l10_counter = (u32 __iomem *)&dummy_master_l10_counter;
        dummy_master_l10_counter = 0;
 
        rootnp = of_find_node_by_path("/ambapp0");
index e16c415..899b720 100644 (file)
@@ -98,82 +98,3 @@ resource_size_t pcibios_align_resource(void *data, const struct resource *res,
 {
        return res->start;
 }
-
-/* in/out routines taken from pcic.c
- *
- * This probably belongs here rather than ioport.c because
- * we do not want this crud linked into SBus kernels.
- * Also, think for a moment about likes of floppy.c that
- * include architecture specific parts. They may want to redefine ins/outs.
- *
- * We do not use horrible macros here because we want to
- * advance pointer by sizeof(size).
- */
-void outsb(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               outb(*(const char *)src, addr);
-               src += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(outsb);
-
-void outsw(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               outw(*(const short *)src, addr);
-               src += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(outsw);
-
-void outsl(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               outl(*(const long *)src, addr);
-               src += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(outsl);
-
-void insb(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               *(unsigned char *)dst = inb(addr);
-               dst += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(insb);
-
-void insw(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               *(unsigned short *)dst = inw(addr);
-               dst += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(insw);
-
-void insl(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               /*
-                * XXX I am sure we are in for an unaligned trap here.
-                */
-               *(unsigned long *)dst = inl(addr);
-               dst += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(insl);
index 6df26e3..c8bf26e 100644 (file)
@@ -80,7 +80,7 @@ struct grpci1_regs {
 
 struct grpci1_priv {
        struct leon_pci_info    info; /* must be on top of this structure */
-       struct grpci1_regs      *regs;          /* GRPCI register map */
+       struct grpci1_regs __iomem *regs;               /* GRPCI register map */
        struct device           *dev;
        int                     pci_err_mask;   /* STATUS register error mask */
        int                     irq;            /* LEON irqctrl GRPCI IRQ */
@@ -101,7 +101,7 @@ static struct grpci1_priv *grpci1priv;
 static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
                                unsigned int devfn, int where, u32 val);
 
-int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int grpci1_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        struct grpci1_priv *priv = dev->bus->sysdata;
        int irq_group;
@@ -144,7 +144,7 @@ static int grpci1_cfg_r32(struct grpci1_priv *priv, unsigned int bus,
                grpci1_cfg_w32(priv, TGT, 0, PCI_COMMAND, tmp);
        } else {
                /* Bus always little endian (unaffected by byte-swapping) */
-               *val = flip_dword(tmp);
+               *val = swab32(tmp);
        }
 
        return 0;
@@ -197,7 +197,7 @@ static int grpci1_cfg_w32(struct grpci1_priv *priv, unsigned int bus,
 
        pci_conf = (unsigned int *) (priv->pci_conf |
                                                (devfn << 8) | (where & 0xfc));
-       LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+       LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
 
        return 0;
 }
@@ -417,10 +417,10 @@ out:
  *  BAR1: peripheral DMA to host's memory (size at least 256MByte)
  *  BAR2..BAR5: not implemented in hardware
  */
-void grpci1_hw_init(struct grpci1_priv *priv)
+static void grpci1_hw_init(struct grpci1_priv *priv)
 {
        u32 ahbadr, bar_sz, data, pciadr;
-       struct grpci1_regs *regs = priv->regs;
+       struct grpci1_regs __iomem *regs = priv->regs;
 
        /* set 1:1 mapping between AHB -> PCI memory space */
        REGSTORE(regs->cfg_stat, priv->pci_area & 0xf0000000);
@@ -509,7 +509,7 @@ static irqreturn_t grpci1_err_interrupt(int irq, void *arg)
 
 static int grpci1_of_probe(struct platform_device *ofdev)
 {
-       struct grpci1_regs *regs;
+       struct grpci1_regs __iomem *regs;
        struct grpci1_priv *priv;
        int err, len;
        const int *tmp;
@@ -690,7 +690,7 @@ err3:
 err2:
        release_resource(&priv->info.mem_space);
 err1:
-       iounmap((void *)priv->pci_io_va);
+       iounmap((void __iomem *)priv->pci_io_va);
        grpci1priv = NULL;
        return err;
 }
index 24d6a44..e433a4d 100644 (file)
@@ -191,7 +191,7 @@ struct grpci2_cap_first {
 
 struct grpci2_priv {
        struct leon_pci_info    info; /* must be on top of this structure */
-       struct grpci2_regs      *regs;
+       struct grpci2_regs __iomem *regs;
        char                    irq;
        char                    irq_mode; /* IRQ Mode from CAPSTS REG */
        char                    bt_enabled;
@@ -215,10 +215,10 @@ struct grpci2_priv {
        struct grpci2_barcfg    tgtbars[6];
 };
 
-DEFINE_SPINLOCK(grpci2_dev_lock);
-struct grpci2_priv *grpci2priv;
+static DEFINE_SPINLOCK(grpci2_dev_lock);
+static struct grpci2_priv *grpci2priv;
 
-int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
+static int grpci2_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
 {
        struct grpci2_priv *priv = dev->bus->sysdata;
        int irq_group;
@@ -270,7 +270,7 @@ static int grpci2_cfg_r32(struct grpci2_priv *priv, unsigned int bus,
                *val = 0xffffffff;
        } else {
                /* Bus always little endian (unaffected by byte-swapping) */
-               *val = flip_dword(tmp);
+               *val = swab32(tmp);
        }
 
        return 0;
@@ -328,7 +328,7 @@ static int grpci2_cfg_w32(struct grpci2_priv *priv, unsigned int bus,
 
        pci_conf = (unsigned int *) (priv->pci_conf |
                                                (devfn << 8) | (where & 0xfc));
-       LEON3_BYPASS_STORE_PA(pci_conf, flip_dword(val));
+       LEON3_BYPASS_STORE_PA(pci_conf, swab32(val));
 
        /* Wait until GRPCI2 signals that CFG access is done, it should be
         * done instantaneously unless a DMA operation is ongoing...
@@ -561,10 +561,10 @@ out:
        return virq;
 }
 
-void grpci2_hw_init(struct grpci2_priv *priv)
+static void grpci2_hw_init(struct grpci2_priv *priv)
 {
        u32 ahbadr, pciadr, bar_sz, capptr, io_map, data;
-       struct grpci2_regs *regs = priv->regs;
+       struct grpci2_regs __iomem *regs = priv->regs;
        int i;
        struct grpci2_barcfg *barcfg = priv->tgtbars;
 
@@ -655,7 +655,7 @@ static irqreturn_t grpci2_jump_interrupt(int irq, void *arg)
 static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
 {
        struct grpci2_priv *priv = arg;
-       struct grpci2_regs *regs = priv->regs;
+       struct grpci2_regs __iomem *regs = priv->regs;
        unsigned int status;
 
        status = REGLOAD(regs->sts_cap);
@@ -682,7 +682,7 @@ static irqreturn_t grpci2_err_interrupt(int irq, void *arg)
 
 static int grpci2_of_probe(struct platform_device *ofdev)
 {
-       struct grpci2_regs *regs;
+       struct grpci2_regs __iomem *regs;
        struct grpci2_priv *priv;
        int err, i, len;
        const int *tmp;
@@ -878,7 +878,7 @@ err4:
        release_resource(&priv->info.mem_space);
 err3:
        err = -ENOMEM;
-       iounmap((void *)priv->pci_io_va);
+       iounmap((void __iomem *)priv->pci_io_va);
 err2:
        kfree(priv);
 err1:
index b0b3967..ddcf950 100644 (file)
 #include <asm/processor.h>
 
 /* List of Systems that need fixup instructions around power-down instruction */
-unsigned int pmc_leon_fixup_ids[] = {
+static unsigned int pmc_leon_fixup_ids[] = {
        AEROFLEX_UT699,
        GAISLER_GR712RC,
        LEON4_NEXTREME1,
        0
 };
 
-int pmc_leon_need_fixup(void)
+static int pmc_leon_need_fixup(void)
 {
        unsigned int systemid = amba_system_id >> 16;
        unsigned int *id;
@@ -38,7 +38,7 @@ int pmc_leon_need_fixup(void)
  * CPU idle callback function for systems that need some extra handling
  * See .../arch/sparc/kernel/process.c
  */
-void pmc_leon_idle_fixup(void)
+static void pmc_leon_idle_fixup(void)
 {
        /* Prepare an address to a non-cachable region. APB is always
         * none-cachable. One instruction is executed after the Sleep
@@ -62,7 +62,7 @@ void pmc_leon_idle_fixup(void)
  * CPU idle callback function
  * See .../arch/sparc/kernel/process.c
  */
-void pmc_leon_idle(void)
+static void pmc_leon_idle(void)
 {
        /* Interrupts need to be enabled to not hang the CPU */
        local_irq_enable();
index 6edf955..018ef11 100644 (file)
@@ -130,7 +130,7 @@ void leon_configure_cache_smp(void)
        local_ops->tlb_all();
 }
 
-void leon_smp_setbroadcast(unsigned int mask)
+static void leon_smp_setbroadcast(unsigned int mask)
 {
        int broadcast =
            ((LEON3_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpstatus)) >>
@@ -148,13 +148,6 @@ void leon_smp_setbroadcast(unsigned int mask)
        LEON_BYPASS_STORE_PA(&(leon3_irqctrl_regs->mpbroadcast), mask);
 }
 
-unsigned int leon_smp_getbroadcast(void)
-{
-       unsigned int mask;
-       mask = LEON_BYPASS_LOAD_PA(&(leon3_irqctrl_regs->mpbroadcast));
-       return mask;
-}
-
 int leon_smp_nrcpus(void)
 {
        int nrcpu =
@@ -266,10 +259,6 @@ void __init leon_smp_done(void)
 
 }
 
-void leon_irq_rotate(int cpu)
-{
-}
-
 struct leon_ipi_work {
        int single;
        int msk;
index 3241f56..de0ee39 100644 (file)
@@ -5,8 +5,10 @@
 #include <linux/mod_devicetable.h>
 #include <linux/errno.h>
 #include <linux/irq.h>
-#include <linux/of_device.h>
 #include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
 
 #include "of_device_common.h"
 
index 857ad77..539babf 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/apb.h>
 
 #include "pci_impl.h"
+#include "kernel.h"
 
 /* List of all PCI controllers found in the system. */
 struct pci_pbm_info *pci_pbm_root = NULL;
index 5f68853..75803c7 100644 (file)
@@ -48,8 +48,8 @@ struct sparc64_msiq_ops {
                              unsigned long devino);
 };
 
-extern void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
-                                const struct sparc64_msiq_ops *ops);
+void sparc64_pbm_msi_init(struct pci_pbm_info *pbm,
+                         const struct sparc64_msiq_ops *ops);
 
 struct sparc64_msiq_cookie {
        struct pci_pbm_info *pbm;
@@ -158,23 +158,23 @@ extern struct pci_pbm_info *pci_pbm_root;
 extern int pci_num_pbms;
 
 /* PCI bus scanning and fixup support. */
-extern void pci_get_pbm_props(struct pci_pbm_info *pbm);
-extern struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
-                                       struct device *parent);
-extern void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
+void pci_get_pbm_props(struct pci_pbm_info *pbm);
+struct pci_bus *pci_scan_one_pbm(struct pci_pbm_info *pbm,
+                                struct device *parent);
+void pci_determine_mem_io_space(struct pci_pbm_info *pbm);
 
 /* Error reporting support. */
-extern void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
-extern void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
-extern void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_target_abort(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_master_abort(struct pci_pbm_info *, struct pci_bus *);
+void pci_scan_for_parity_error(struct pci_pbm_info *, struct pci_bus *);
 
 /* Configuration space access. */
-extern void pci_config_read8(u8 *addr, u8 *ret);
-extern void pci_config_read16(u16 *addr, u16 *ret);
-extern void pci_config_read32(u32 *addr, u32 *ret);
-extern void pci_config_write8(u8 *addr, u8 val);
-extern void pci_config_write16(u16 *addr, u16 val);
-extern void pci_config_write32(u32 *addr, u32 val);
+void pci_config_read8(u8 *addr, u8 *ret);
+void pci_config_read16(u16 *addr, u16 *ret);
+void pci_config_read32(u32 *addr, u32 *ret);
+void pci_config_write8(u8 *addr, u8 val);
+void pci_config_write16(u16 *addr, u16 val);
+void pci_config_write32(u32 *addr, u32 val);
 
 extern struct pci_ops sun4u_pci_ops;
 extern struct pci_ops sun4v_pci_ops;
index 8e9fc3a..5642212 100644 (file)
@@ -6,87 +6,87 @@
 #ifndef _PCI_SUN4V_H
 #define _PCI_SUN4V_H
 
-extern long pci_sun4v_iommu_map(unsigned long devhandle,
-                               unsigned long tsbid,
-                               unsigned long num_ttes,
-                               unsigned long io_attributes,
-                               unsigned long io_page_list_pa);
-extern unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
-                                          unsigned long tsbid,
-                                          unsigned long num_ttes);
-extern unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
-                                           unsigned long tsbid,
-                                           unsigned long *io_attributes,
-                                           unsigned long *real_address);
-extern unsigned long pci_sun4v_config_get(unsigned long devhandle,
-                                         unsigned long pci_device,
-                                         unsigned long config_offset,
-                                         unsigned long size);
-extern int pci_sun4v_config_put(unsigned long devhandle,
-                               unsigned long pci_device,
-                               unsigned long config_offset,
-                               unsigned long size,
-                               unsigned long data);
+long pci_sun4v_iommu_map(unsigned long devhandle,
+                        unsigned long tsbid,
+                        unsigned long num_ttes,
+                        unsigned long io_attributes,
+                        unsigned long io_page_list_pa);
+unsigned long pci_sun4v_iommu_demap(unsigned long devhandle,
+                                   unsigned long tsbid,
+                                   unsigned long num_ttes);
+unsigned long pci_sun4v_iommu_getmap(unsigned long devhandle,
+                                    unsigned long tsbid,
+                                    unsigned long *io_attributes,
+                                    unsigned long *real_address);
+unsigned long pci_sun4v_config_get(unsigned long devhandle,
+                                  unsigned long pci_device,
+                                  unsigned long config_offset,
+                                  unsigned long size);
+int pci_sun4v_config_put(unsigned long devhandle,
+                        unsigned long pci_device,
+                        unsigned long config_offset,
+                        unsigned long size,
+                        unsigned long data);
 
-extern unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
+unsigned long pci_sun4v_msiq_conf(unsigned long devhandle,
                                         unsigned long msiqid,
                                         unsigned long msiq_paddr,
                                         unsigned long num_entries);
-extern unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
-                                        unsigned long msiqid,
-                                        unsigned long *msiq_paddr,
-                                        unsigned long *num_entries);
-extern unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *valid);
-extern unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long valid);
-extern unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *state);
-extern unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long state);
-extern unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *head);
-extern unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long head);
-extern unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
-                                            unsigned long msiqid,
-                                            unsigned long *head);
-extern unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long *valid);
-extern unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long valid);
-extern unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long *msiq);
-extern unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long msiq,
-                                          unsigned long msitype);
-extern unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long *state);
-extern unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long state);
-extern unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long *msiq);
-extern unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
-                                          unsigned long msinum,
-                                          unsigned long msiq);
-extern unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long *valid);
-extern unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
-                                           unsigned long msinum,
-                                           unsigned long valid);
+unsigned long pci_sun4v_msiq_info(unsigned long devhandle,
+                                 unsigned long msiqid,
+                                 unsigned long *msiq_paddr,
+                                 unsigned long *num_entries);
+unsigned long pci_sun4v_msiq_getvalid(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long *valid);
+unsigned long pci_sun4v_msiq_setvalid(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long valid);
+unsigned long pci_sun4v_msiq_getstate(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long *state);
+unsigned long pci_sun4v_msiq_setstate(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long state);
+unsigned long pci_sun4v_msiq_gethead(unsigned long devhandle,
+                                    unsigned long msiqid,
+                                    unsigned long *head);
+unsigned long pci_sun4v_msiq_sethead(unsigned long devhandle,
+                                    unsigned long msiqid,
+                                    unsigned long head);
+unsigned long pci_sun4v_msiq_gettail(unsigned long devhandle,
+                                     unsigned long msiqid,
+                                     unsigned long *head);
+unsigned long pci_sun4v_msi_getvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long *valid);
+unsigned long pci_sun4v_msi_setvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long valid);
+unsigned long pci_sun4v_msi_getmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long *msiq);
+unsigned long pci_sun4v_msi_setmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long msiq,
+                                   unsigned long msitype);
+unsigned long pci_sun4v_msi_getstate(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long *state);
+unsigned long pci_sun4v_msi_setstate(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long state);
+unsigned long pci_sun4v_msg_getmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long *msiq);
+unsigned long pci_sun4v_msg_setmsiq(unsigned long devhandle,
+                                   unsigned long msinum,
+                                   unsigned long msiq);
+unsigned long pci_sun4v_msg_getvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long *valid);
+unsigned long pci_sun4v_msg_setvalid(unsigned long devhandle,
+                                    unsigned long msinum,
+                                    unsigned long valid);
 
 #endif /* !(_PCI_SUN4V_H) */
index 09f4fdd..6cc78c2 100644 (file)
@@ -36,6 +36,7 @@
 #include <asm/uaccess.h>
 #include <asm/irq_regs.h>
 
+#include "kernel.h"
 #include "irq.h"
 
 /*
@@ -162,8 +163,8 @@ static int pcic0_up;
 static struct linux_pcic pcic0;
 
 void __iomem *pcic_regs;
-volatile int pcic_speculative;
-volatile int pcic_trapped;
+static volatile int pcic_speculative;
+static volatile int pcic_trapped;
 
 /* forward */
 unsigned int pcic_build_device_irq(struct platform_device *op,
@@ -329,7 +330,7 @@ int __init pcic_probe(void)
 
        pcic->pcic_res_cfg_addr.name = "pcic_cfg_addr";
        if ((pcic->pcic_config_space_addr =
-           ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == 0) {
+           ioremap(regs[2].phys_addr, regs[2].reg_size * 2)) == NULL) {
                prom_printf("PCIC: Error, cannot map "
                            "PCI Configuration Space Address.\n");
                prom_halt();
@@ -341,7 +342,7 @@ int __init pcic_probe(void)
         */
        pcic->pcic_res_cfg_data.name = "pcic_cfg_data";
        if ((pcic->pcic_config_space_data =
-           ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == 0) {
+           ioremap(regs[3].phys_addr, regs[3].reg_size * 2)) == NULL) {
                prom_printf("PCIC: Error, cannot map "
                            "PCI Configuration Space Data.\n");
                prom_halt();
@@ -353,7 +354,6 @@ int __init pcic_probe(void)
        strcpy(pbm->prom_name, namebuf);
 
        {
-               extern volatile int t_nmi[4];
                extern int pcic_nmi_trap_patch[4];
 
                t_nmi[0] = pcic_nmi_trap_patch[0];
@@ -536,7 +536,7 @@ pcic_fill_irq(struct linux_pcic *pcic, struct pci_dev *dev, int node)
                prom_getstring(node, "name", namebuf, sizeof(namebuf));
        }
 
-       if ((p = pcic->pcic_imap) == 0) {
+       if ((p = pcic->pcic_imap) == NULL) {
                dev->irq = 0;
                return;
        }
@@ -670,30 +670,6 @@ void pcibios_fixup_bus(struct pci_bus *bus)
        }
 }
 
-/*
- * pcic_pin_to_irq() is exported to bus probing code
- */
-unsigned int
-pcic_pin_to_irq(unsigned int pin, const char *name)
-{
-       struct linux_pcic *pcic = &pcic0;
-       unsigned int irq;
-       unsigned int ivec;
-
-       if (pin < 4) {
-               ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_LO);
-               irq = ivec >> (pin << 2) & 0xF;
-       } else if (pin < 8) {
-               ivec = readw(pcic->pcic_regs+PCI_INT_SELECT_HI);
-               irq = ivec >> ((pin-4) << 2) & 0xF;
-       } else {                                        /* Corrupted map */
-               printk("PCIC: BAD PIN %d FOR %s\n", pin, name);
-               for (;;) {}     /* XXX Cannot panic properly in case of PROLL */
-       }
-/* P3 */ /* printk("PCIC: dev %s pin %d ivec 0x%x irq %x\n", name, pin, ivec, irq); */
-       return irq;
-}
-
 /* Makes compiler happy */
 static volatile int pcic_timer_dummy;
 
@@ -783,7 +759,7 @@ int pcibios_enable_device(struct pci_dev *pdev, int mask)
 void pcic_nmi(unsigned int pend, struct pt_regs *regs)
 {
 
-       pend = flip_dword(pend);
+       pend = swab32(pend);
 
        if (!pcic_speculative || (pend & PCI_SYS_INT_PENDING_PIO) == 0) {
                /*
@@ -875,82 +851,4 @@ void __init sun4m_pci_init_IRQ(void)
        sparc_config.load_profile_irq = pcic_load_profile_irq;
 }
 
-/*
- * This probably belongs here rather than ioport.c because
- * we do not want this crud linked into SBus kernels.
- * Also, think for a moment about likes of floppy.c that
- * include architecture specific parts. They may want to redefine ins/outs.
- *
- * We do not use horrible macros here because we want to
- * advance pointer by sizeof(size).
- */
-void outsb(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               outb(*(const char *)src, addr);
-               src += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(outsb);
-
-void outsw(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               outw(*(const short *)src, addr);
-               src += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(outsw);
-
-void outsl(unsigned long addr, const void *src, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               outl(*(const long *)src, addr);
-               src += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(outsl);
-
-void insb(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 1;
-               *(unsigned char *)dst = inb(addr);
-               dst += 1;
-               /* addr += 1; */
-       }
-}
-EXPORT_SYMBOL(insb);
-
-void insw(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 2;
-               *(unsigned short *)dst = inw(addr);
-               dst += 2;
-               /* addr += 2; */
-       }
-}
-EXPORT_SYMBOL(insw);
-
-void insl(unsigned long addr, void *dst, unsigned long count)
-{
-       while (count) {
-               count -= 4;
-               /*
-                * XXX I am sure we are in for an unaligned trap here.
-                */
-               *(unsigned long *)dst = inl(addr);
-               dst += 4;
-               /* addr += 4; */
-       }
-}
-EXPORT_SYMBOL(insl);
-
 subsys_initcall(pcic_init);
index b5c38fa..8efd337 100644 (file)
@@ -110,7 +110,7 @@ struct cpu_hw_events {
 
        unsigned int            group_flag;
 };
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
 
 /* An event map describes the characteristics of a performance
  * counter event.  In particular it gives the encoding as well as
@@ -1153,7 +1153,7 @@ static void perf_stop_nmi_watchdog(void *unused)
                cpuc->pcr[i] = pcr_ops->read_pcr(i);
 }
 
-void perf_event_grab_pmc(void)
+static void perf_event_grab_pmc(void)
 {
        if (atomic_inc_not_zero(&active_events))
                return;
@@ -1169,7 +1169,7 @@ void perf_event_grab_pmc(void)
        mutex_unlock(&pmc_grab_mutex);
 }
 
-void perf_event_release_pmc(void)
+static void perf_event_release_pmc(void)
 {
        if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
                if (atomic_read(&nmi_active) == 0)
@@ -1669,7 +1669,7 @@ static bool __init supported_pmu(void)
        return false;
 }
 
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
 {
        pr_info("Performance events: ");
 
@@ -1742,10 +1742,11 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
 
        ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
        do {
-               struct sparc_stackf *usf, sf;
+               struct sparc_stackf __user *usf;
+               struct sparc_stackf sf;
                unsigned long pc;
 
-               usf = (struct sparc_stackf *) ufp;
+               usf = (struct sparc_stackf __user *)ufp;
                if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
                        break;
 
@@ -1765,17 +1766,19 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
                unsigned long pc;
 
                if (thread32_stack_is_64bit(ufp)) {
-                       struct sparc_stackf *usf, sf;
+                       struct sparc_stackf __user *usf;
+                       struct sparc_stackf sf;
 
                        ufp += STACK_BIAS;
-                       usf = (struct sparc_stackf *) ufp;
+                       usf = (struct sparc_stackf __user *)ufp;
                        if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
                                break;
                        pc = sf.callers_pc & 0xffffffff;
                        ufp = ((unsigned long) sf.fp) & 0xffffffff;
                } else {
-                       struct sparc_stackf32 *usf, sf;
-                       usf = (struct sparc_stackf32 *) ufp;
+                       struct sparc_stackf32 __user *usf;
+                       struct sparc_stackf32 sf;
+                       usf = (struct sparc_stackf32 __user *)ufp;
                        if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
                                break;
                        pc = sf.callers_pc;
index 510baec..50e7b62 100644 (file)
@@ -10,6 +10,7 @@
 
 #include <stdarg.h>
 
+#include <linux/elfcore.h>
 #include <linux/errno.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -23,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/pm.h>
 #include <linux/slab.h>
+#include <linux/cpu.h>
 
 #include <asm/auxio.h>
 #include <asm/oplib.h>
@@ -38,6 +40,8 @@
 #include <asm/unistd.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
+
 /* 
  * Power management idle function 
  * Set in pm platform drivers (apc.c and pmc.c)
@@ -102,8 +106,12 @@ void machine_restart(char * cmd)
 void machine_power_off(void)
 {
        if (auxio_power_register &&
-           (strcmp(of_console_device->type, "serial") || scons_pwroff))
-               *auxio_power_register |= AUXIO_POWER_OFF;
+           (strcmp(of_console_device->type, "serial") || scons_pwroff)) {
+               u8 power_register = sbus_readb(auxio_power_register);
+               power_register |= AUXIO_POWER_OFF;
+               sbus_writeb(power_register, auxio_power_register);
+       }
+
        machine_halt();
 }
 
index d7b4967..027e099 100644 (file)
@@ -88,7 +88,7 @@ void arch_cpu_idle(void)
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
-void arch_cpu_idle_dead()
+void arch_cpu_idle_dead(void)
 {
        sched_preempt_enable_no_resched();
        cpu_play_dead();
@@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
        }
 }
 
-void arch_trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        struct thread_info *tp = current_thread_info();
        struct pt_regs *regs = get_irq_regs();
@@ -251,16 +251,22 @@ void arch_trigger_all_cpu_backtrace(void)
 
        spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
 
-       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
-
        this_cpu = raw_smp_processor_id();
 
-       __global_reg_self(tp, regs, this_cpu);
+       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+
+       if (include_self)
+               __global_reg_self(tp, regs, this_cpu);
 
        smp_fetch_global_regs();
 
        for_each_online_cpu(cpu) {
-               struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg;
+               struct global_reg_snapshot *gp;
+
+               if (!include_self && cpu == this_cpu)
+                       continue;
+
+               gp = &global_cpu_snapshot[cpu].reg;
 
                __global_reg_poll(gp);
 
@@ -292,7 +298,7 @@ void arch_trigger_all_cpu_backtrace(void)
 
 static void sysrq_handle_globreg(int key)
 {
-       arch_trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace(true);
 }
 
 static struct sysrq_key_op sparc_globalreg_op = {
index cf5fe1c..890281b 100644 (file)
@@ -4,7 +4,7 @@
 #include <linux/spinlock.h>
 #include <asm/prom.h>
 
-extern void of_console_init(void);
+void of_console_init(void);
 
 extern unsigned int prom_early_allocated;
 
index 9a690d3..20cc5d8 100644 (file)
  *      2 of the License, or (at your option) any later version.
  */
 
+#include <linux/memblock.h>
 #include <linux/kernel.h>
-#include <linux/types.h>
 #include <linux/string.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
 #include <linux/mm.h>
-#include <linux/memblock.h>
 #include <linux/of.h>
 
 #include <asm/prom.h>
index 590b4ed..05a6e30 100644 (file)
@@ -30,19 +30,19 @@ enum psycho_error_type {
        UE_ERR, CE_ERR, PCI_ERR
 };
 
-extern void psycho_check_iommu_error(struct pci_pbm_info *pbm,
-                                    unsigned long afsr,
-                                    unsigned long afar,
-                                    enum psycho_error_type type);
+void psycho_check_iommu_error(struct pci_pbm_info *pbm,
+                             unsigned long afsr,
+                             unsigned long afar,
+                             enum psycho_error_type type);
 
-extern irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
+irqreturn_t psycho_pcierr_intr(int irq, void *dev_id);
 
-extern int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
-                            u32 dvma_offset, u32 dma_mask,
-                            unsigned long write_complete_offset);
+int psycho_iommu_init(struct pci_pbm_info *pbm, int tsbsize,
+                     u32 dvma_offset, u32 dma_mask,
+                     unsigned long write_complete_offset);
 
-extern void psycho_pbm_init_common(struct pci_pbm_info *pbm,
-                                  struct platform_device *op,
-                                  const char *chip_name, int chip_type);
+void psycho_pbm_init_common(struct pci_pbm_info *pbm,
+                           struct platform_device *op,
+                           const char *chip_name, int chip_type);
 
 #endif /* _PSYCHO_COMMON_H */
index 896ba7c..a331fdc 100644 (file)
@@ -26,6 +26,8 @@
 #include <asm/uaccess.h>
 #include <asm/cacheflush.h>
 
+#include "kernel.h"
+
 /* #define ALLOW_INIT_TRACING */
 
 /*
index 1434526..baef495 100644 (file)
@@ -267,7 +267,7 @@ static __init void leon_patch(void)
 }
 
 struct tt_entry *sparc_ttable;
-struct pt_regs fake_swapper_regs;
+static struct pt_regs fake_swapper_regs;
 
 /* Called from head_32.S - before we have setup anything
  * in the kernel. Be very careful with what you do here.
@@ -365,7 +365,7 @@ void __init setup_arch(char **cmdline_p)
 
        prom_setsync(prom_sync_me);
 
-       if((boot_flags&BOOTME_DEBUG) && (linux_dbvec!=0) && 
+       if((boot_flags & BOOTME_DEBUG) && (linux_dbvec != NULL) &&
           ((*(short *)linux_dbvec) != -1)) {
                printk("Booted under KADB. Syncing trap table.\n");
                (*(linux_dbvec->teach_debugger))();
index ee789d2..62deba7 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/switch_to.h>
 
 #include "sigutil.h"
+#include "kernel.h"
 
 /* This magic should be in g_upper[0] for all upper parts
  * to be valid.
@@ -145,7 +146,7 @@ void do_sigreturn32(struct pt_regs *regs)
        unsigned int psr;
        unsigned pc, npc;
        sigset_t set;
-       unsigned seta[_COMPAT_NSIG_WORDS];
+       compat_sigset_t seta;
        int err, i;
        
        /* Always make any pending restarted system calls return -EINTR */
@@ -209,17 +210,13 @@ void do_sigreturn32(struct pt_regs *regs)
                if (restore_rwin_state(compat_ptr(rwin_save)))
                        goto segv;
        }
-       err |= __get_user(seta[0], &sf->info.si_mask);
-       err |= copy_from_user(seta+1, &sf->extramask,
+       err |= __get_user(seta.sig[0], &sf->info.si_mask);
+       err |= copy_from_user(&seta.sig[1], &sf->extramask,
                              (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
        if (err)
                goto segv;
-       switch (_NSIG_WORDS) {
-               case 4: set.sig[3] = seta[6] + (((long)seta[7]) << 32);
-               case 3: set.sig[2] = seta[4] + (((long)seta[5]) << 32);
-               case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
-               case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
-       }
+
+       set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
        set_current_blocked(&set);
        return;
 
@@ -303,12 +300,7 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
                        goto segv;
        }
 
-       switch (_NSIG_WORDS) {
-               case 4: set.sig[3] = seta.sig[6] + (((long)seta.sig[7]) << 32);
-               case 3: set.sig[2] = seta.sig[4] + (((long)seta.sig[5]) << 32);
-               case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
-               case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
-       }
+       set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
        set_current_blocked(&set);
        return;
 segv:
@@ -417,7 +409,7 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
        void __user *tail;
        int sigframe_size;
        u32 psr;
-       unsigned int seta[_COMPAT_NSIG_WORDS];
+       compat_sigset_t seta;
 
        /* 1. Make sure everything is clean */
        synchronize_user_stack();
@@ -481,18 +473,14 @@ static int setup_frame32(struct ksignal *ksig, struct pt_regs *regs,
                err |= __put_user(0, &sf->rwin_save);
        }
 
-       switch (_NSIG_WORDS) {
-       case 4: seta[7] = (oldset->sig[3] >> 32);
-               seta[6] = oldset->sig[3];
-       case 3: seta[5] = (oldset->sig[2] >> 32);
-               seta[4] = oldset->sig[2];
-       case 2: seta[3] = (oldset->sig[1] >> 32);
-               seta[2] = oldset->sig[1];
-       case 1: seta[1] = (oldset->sig[0] >> 32);
-               seta[0] = oldset->sig[0];
-       }
-       err |= __put_user(seta[0], &sf->info.si_mask);
-       err |= __copy_to_user(sf->extramask, seta + 1,
+       /* If these change we need to know - assignments to seta relies on these sizes */
+       BUILD_BUG_ON(_NSIG_WORDS != 1);
+       BUILD_BUG_ON(_COMPAT_NSIG_WORDS != 2);
+       seta.sig[1] = (oldset->sig[0] >> 32);
+       seta.sig[0] = oldset->sig[0];
+
+       err |= __put_user(seta.sig[0], &sf->info.si_mask);
+       err |= __copy_to_user(sf->extramask, &seta.sig[1],
                              (_COMPAT_NSIG_WORDS - 1) * sizeof(unsigned int));
 
        if (!wsaved) {
@@ -622,16 +610,8 @@ static int setup_rt_frame32(struct ksignal *ksig, struct pt_regs *regs,
        /* Setup sigaltstack */
        err |= __compat_save_altstack(&sf->stack, regs->u_regs[UREG_FP]);
 
-       switch (_NSIG_WORDS) {
-       case 4: seta.sig[7] = (oldset->sig[3] >> 32);
-               seta.sig[6] = oldset->sig[3];
-       case 3: seta.sig[5] = (oldset->sig[2] >> 32);
-               seta.sig[4] = oldset->sig[2];
-       case 2: seta.sig[3] = (oldset->sig[1] >> 32);
-               seta.sig[2] = oldset->sig[1];
-       case 1: seta.sig[1] = (oldset->sig[0] >> 32);
-               seta.sig[0] = oldset->sig[0];
-       }
+       seta.sig[1] = (oldset->sig[0] >> 32);
+       seta.sig[0] = oldset->sig[0];
        err |= __copy_to_user(&sf->mask, &seta, sizeof(compat_sigset_t));
 
        if (!wsaved) {
index 7d5d8e1..9ee72fc 100644 (file)
@@ -28,6 +28,7 @@
 #include <asm/switch_to.h>
 
 #include "sigutil.h"
+#include "kernel.h"
 
 extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
                   void *fpqueue, unsigned long *fpqdepth);
@@ -341,7 +342,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
        err |= __put_user(0, &sf->extra_size);
 
        if (psr & PSR_EF) {
-               __siginfo_fpu_t *fp = tail;
+               __siginfo_fpu_t __user *fp = tail;
                tail += sizeof(*fp);
                err |= save_fpu_state(regs, fp);
                err |= __put_user(fp, &sf->fpu_save);
@@ -349,7 +350,7 @@ static int setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs,
                err |= __put_user(0, &sf->fpu_save);
        }
        if (wsaved) {
-               __siginfo_rwin_t *rwp = tail;
+               __siginfo_rwin_t __user *rwp = tail;
                tail += sizeof(*rwp);
                err |= save_rwin_state(wsaved, rwp);
                err |= __put_user(rwp, &sf->rwin_save);
@@ -517,9 +518,9 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
        }
 }
 
-asmlinkage int
-do_sys_sigstack(struct sigstack __user *ssptr, struct sigstack __user *ossptr,
-               unsigned long sp)
+asmlinkage int do_sys_sigstack(struct sigstack __user *ssptr,
+                               struct sigstack __user *ossptr,
+                               unsigned long sp)
 {
        int ret = -EFAULT;
 
index cd91d01..1a69998 100644 (file)
 #include <asm/switch_to.h>
 #include <asm/cacheflush.h>
 
-#include "entry.h"
-#include "systbls.h"
 #include "sigutil.h"
+#include "systbls.h"
+#include "kernel.h"
+#include "entry.h"
 
 /* {set, get}context() needed for 64-bit SparcLinux userland. */
 asmlinkage void sparc64_set_context(struct pt_regs *regs)
@@ -492,7 +493,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
 
 #ifdef CONFIG_COMPAT
        if (test_thread_flag(TIF_32BIT)) {
-               extern void do_signal32(struct pt_regs *);
                do_signal32(regs);
                return;
        }
index a102bfb..7958242 100644 (file)
@@ -20,6 +20,7 @@
 #include <linux/seq_file.h>
 #include <linux/cache.h>
 #include <linux/delay.h>
+#include <linux/profile.h>
 #include <linux/cpu.h>
 
 #include <asm/ptrace.h>
@@ -75,8 +76,6 @@ void smp_store_cpu_info(int id)
 
 void __init smp_cpus_done(unsigned int max_cpus)
 {
-       extern void smp4m_smp_done(void);
-       extern void smp4d_smp_done(void);
        unsigned long bogosum = 0;
        int cpu, num = 0;
 
@@ -183,8 +182,6 @@ int setup_profiling_timer(unsigned int multiplier)
 
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
-       extern void __init smp4m_boot_cpus(void);
-       extern void __init smp4d_boot_cpus(void);
        int i, cpuid, extra;
 
        printk("Entering SMP Mode...\n");
@@ -261,8 +258,6 @@ void __init smp_prepare_boot_cpu(void)
 
 int __cpu_up(unsigned int cpu, struct task_struct *tidle)
 {
-       extern int smp4m_boot_one_cpu(int, struct task_struct *);
-       extern int smp4d_boot_one_cpu(int, struct task_struct *);
        int ret=0;
 
        switch(sparc_cpu_model) {
@@ -297,7 +292,7 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        return ret;
 }
 
-void arch_cpu_pre_starting(void *arg)
+static void arch_cpu_pre_starting(void *arg)
 {
        local_ops->cache_all();
        local_ops->tlb_all();
@@ -317,7 +312,7 @@ void arch_cpu_pre_starting(void *arg)
        }
 }
 
-void arch_cpu_pre_online(void *arg)
+static void arch_cpu_pre_online(void *arg)
 {
        unsigned int cpuid = hard_smp_processor_id();
 
@@ -344,7 +339,7 @@ void arch_cpu_pre_online(void *arg)
        }
 }
 
-void sparc_start_secondary(void *arg)
+static void sparc_start_secondary(void *arg)
 {
        unsigned int cpu;
 
index 745a363..41aa247 100644 (file)
@@ -25,6 +25,7 @@
 #include <linux/ftrace.h>
 #include <linux/cpu.h>
 #include <linux/slab.h>
+#include <linux/kgdb.h>
 
 #include <asm/head.h>
 #include <asm/ptrace.h>
@@ -35,6 +36,7 @@
 #include <asm/hvtramp.h>
 #include <asm/io.h>
 #include <asm/timer.h>
+#include <asm/setup.h>
 
 #include <asm/irq.h>
 #include <asm/irq_regs.h>
@@ -52,6 +54,7 @@
 #include <asm/pcr.h>
 
 #include "cpumap.h"
+#include "kernel.h"
 
 DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
@@ -272,14 +275,6 @@ static void smp_synchronize_one_tick(int cpu)
 }
 
 #if defined(CONFIG_SUN_LDOMS) && defined(CONFIG_HOTPLUG_CPU)
-/* XXX Put this in some common place. XXX */
-static unsigned long kimage_addr_to_ra(void *p)
-{
-       unsigned long val = (unsigned long) p;
-
-       return kern_base + (val - KERNBASE);
-}
-
 static void ldom_startcpu_cpuid(unsigned int cpu, unsigned long thread_reg,
                                void **descrp)
 {
@@ -867,11 +862,6 @@ extern unsigned long xcall_flush_dcache_page_cheetah;
 #endif
 extern unsigned long xcall_flush_dcache_page_spitfire;
 
-#ifdef CONFIG_DEBUG_DCFLUSH
-extern atomic_t dcpage_flushes;
-extern atomic_t dcpage_flushes_xcall;
-#endif
-
 static inline void __local_flush_dcache_page(struct page *page)
 {
 #ifdef DCACHE_ALIASING_POSSIBLE
index f8933be..a1bb267 100644 (file)
@@ -143,7 +143,7 @@ static void sun4d_sbus_handler_irq(int sbusl)
        }
 }
 
-void sun4d_handler_irq(int pil, struct pt_regs *regs)
+void sun4d_handler_irq(unsigned int pil, struct pt_regs *regs)
 {
        struct pt_regs *old_regs;
        /* SBUS IRQ level (1 - 7) */
@@ -236,7 +236,7 @@ static void sun4d_shutdown_irq(struct irq_data *data)
        irq_unlink(data->irq);
 }
 
-struct irq_chip sun4d_irq = {
+static struct irq_chip sun4d_irq = {
        .name           = "sun4d",
        .irq_startup    = sun4d_startup_irq,
        .irq_shutdown   = sun4d_shutdown_irq,
@@ -285,9 +285,9 @@ static void __init sun4d_load_profile_irqs(void)
        }
 }
 
-unsigned int _sun4d_build_device_irq(unsigned int real_irq,
-                                     unsigned int pil,
-                                     unsigned int board)
+static unsigned int _sun4d_build_device_irq(unsigned int real_irq,
+                                            unsigned int pil,
+                                            unsigned int board)
 {
        struct sun4d_handler_data *handler_data;
        unsigned int irq;
@@ -320,8 +320,8 @@ err_out:
 
 
 
-unsigned int sun4d_build_device_irq(struct platform_device *op,
-                                    unsigned int real_irq)
+static unsigned int sun4d_build_device_irq(struct platform_device *op,
+                                           unsigned int real_irq)
 {
        struct device_node *dp = op->dev.of_node;
        struct device_node *board_parent, *bus = dp->parent;
@@ -383,7 +383,8 @@ err_out:
        return irq;
 }
 
-unsigned int sun4d_build_timer_irq(unsigned int board, unsigned int real_irq)
+static unsigned int sun4d_build_timer_irq(unsigned int board,
+                                          unsigned int real_irq)
 {
        return _sun4d_build_device_irq(real_irq, real_irq, board);
 }
index d066eb1..f834224 100644 (file)
@@ -48,6 +48,7 @@ SIGN1(sys32_futex, compat_sys_futex, %o1)
 SIGN1(sys32_recvfrom, compat_sys_recvfrom, %o0)
 SIGN1(sys32_recvmsg, compat_sys_recvmsg, %o0)
 SIGN1(sys32_sendmsg, compat_sys_sendmsg, %o0)
+SIGN2(sys32_renameat2, sys_renameat2, %o0, %o2)
 
        .globl          sys32_mmap2
 sys32_mmap2:
index 7136885..022c30c 100644 (file)
@@ -49,6 +49,8 @@
 #include <asm/mmu_context.h>
 #include <asm/compat_signal.h>
 
+#include "systbls.h"
+
 asmlinkage long sys32_truncate64(const char __user * path, unsigned long high, unsigned long low)
 {
        if ((int)high < 0)
index 3a8d184..646988d 100644 (file)
@@ -24,6 +24,8 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 
+#include "systbls.h"
+
 /* #define DEBUG_UNIMP_SYSCALL */
 
 /* XXX Make this per-binary type, this way we can detect the type of
@@ -68,7 +70,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
  * sys_pipe() is the normal C calling standard for creating
  * a pipe. It's not the way unix traditionally does this, though.
  */
-asmlinkage int sparc_pipe(struct pt_regs *regs)
+asmlinkage long sparc_pipe(struct pt_regs *regs)
 {
        int fd[2];
        int error;
@@ -93,7 +95,7 @@ int sparc_mmap_check(unsigned long addr, unsigned long len)
 
 /* Linux version of mmap */
 
-asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
        unsigned long prot, unsigned long flags, unsigned long fd,
        unsigned long pgoff)
 {
@@ -103,7 +105,7 @@ asmlinkage unsigned long sys_mmap2(unsigned long addr, unsigned long len,
                              pgoff >> (PAGE_SHIFT - 12));
 }
 
-asmlinkage unsigned long sys_mmap(unsigned long addr, unsigned long len,
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
        unsigned long prot, unsigned long flags, unsigned long fd,
        unsigned long off)
 {
@@ -197,7 +199,7 @@ SYSCALL_DEFINE5(rt_sigaction, int, sig,
        return ret;
 }
 
-asmlinkage int sys_getdomainname(char __user *name, int len)
+asmlinkage long sys_getdomainname(char __user *name, int len)
 {
        int nlen, err;
        
index beb0b5a..c85403d 100644 (file)
@@ -31,6 +31,7 @@
 #include <asm/unistd.h>
 
 #include "entry.h"
+#include "kernel.h"
 #include "systbls.h"
 
 /* #define DEBUG_UNIMP_SYSCALL */
index 26e6dd7..2dab823 100644 (file)
 #ifndef _SYSTBLS_H
 #define _SYSTBLS_H
 
+#include <linux/signal.h>
 #include <linux/kernel.h>
+#include <linux/compat.h>
 #include <linux/types.h>
-#include <linux/signal.h>
+
 #include <asm/utrap.h>
 
-extern asmlinkage unsigned long sys_getpagesize(void);
-extern asmlinkage long sparc_pipe(struct pt_regs *regs);
-extern asmlinkage long sys_sparc_ipc(unsigned int call, int first,
-                              unsigned long second,
-                              unsigned long third,
-                              void __user *ptr, long fifth);
-extern asmlinkage long sparc64_personality(unsigned long personality);
-extern asmlinkage long sys64_munmap(unsigned long addr, size_t len);
-extern asmlinkage unsigned long sys64_mremap(unsigned long addr,
-                                            unsigned long old_len,
-                                            unsigned long new_len,
-                                            unsigned long flags,
-                                            unsigned long new_addr);
-extern asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
-extern asmlinkage long sys_getdomainname(char __user *name, int len);
-extern asmlinkage long sys_utrap_install(utrap_entry_t type,
-                                        utrap_handler_t new_p,
-                                        utrap_handler_t new_d,
-                                        utrap_handler_t __user *old_p,
-                                        utrap_handler_t __user *old_d);
-extern asmlinkage long sparc_memory_ordering(unsigned long model,
-                                            struct pt_regs *regs);
-extern asmlinkage long sys_rt_sigaction(int sig,
-                                       const struct sigaction __user *act,
-                                       struct sigaction __user *oact,
-                                       void __user *restorer,
-                                       size_t sigsetsize);
+asmlinkage unsigned long sys_getpagesize(void);
+asmlinkage long sparc_pipe(struct pt_regs *regs);
+asmlinkage unsigned long c_sys_nis_syscall(struct pt_regs *regs);
+asmlinkage long sys_getdomainname(char __user *name, int len);
+void do_rt_sigreturn(struct pt_regs *regs);
+asmlinkage long sys_mmap(unsigned long addr, unsigned long len,
+                        unsigned long prot, unsigned long flags,
+                        unsigned long fd, unsigned long off);
+asmlinkage void sparc_breakpoint(struct pt_regs *regs);
+
+#ifdef CONFIG_SPARC32
+asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
+                         unsigned long prot, unsigned long flags,
+                         unsigned long fd, unsigned long pgoff);
+long sparc_remap_file_pages(unsigned long start, unsigned long size,
+                           unsigned long prot, unsigned long pgoff,
+                           unsigned long flags);
 
-extern asmlinkage void sparc64_set_context(struct pt_regs *regs);
-extern asmlinkage void sparc64_get_context(struct pt_regs *regs);
-extern void do_rt_sigreturn(struct pt_regs *regs);
+#endif /* CONFIG_SPARC32 */
 
+#ifdef CONFIG_SPARC64
+asmlinkage long sys_sparc_ipc(unsigned int call, int first,
+                             unsigned long second,
+                             unsigned long third,
+                             void __user *ptr, long fifth);
+asmlinkage long sparc64_personality(unsigned long personality);
+asmlinkage long sys64_munmap(unsigned long addr, size_t len);
+asmlinkage unsigned long sys64_mremap(unsigned long addr,
+                                     unsigned long old_len,
+                                     unsigned long new_len,
+                                     unsigned long flags,
+                                     unsigned long new_addr);
+asmlinkage long sys_utrap_install(utrap_entry_t type,
+                                 utrap_handler_t new_p,
+                                 utrap_handler_t new_d,
+                                 utrap_handler_t __user *old_p,
+                                 utrap_handler_t __user *old_d);
+asmlinkage long sparc_memory_ordering(unsigned long model,
+                                     struct pt_regs *regs);
+asmlinkage void sparc64_set_context(struct pt_regs *regs);
+asmlinkage void sparc64_get_context(struct pt_regs *regs);
+asmlinkage long sys32_truncate64(const char __user * path,
+                                unsigned long high,
+                                unsigned long low);
+asmlinkage long sys32_ftruncate64(unsigned int fd,
+                                 unsigned long high,
+                                 unsigned long low);
+struct compat_stat64;
+asmlinkage long compat_sys_stat64(const char __user * filename,
+                                 struct compat_stat64 __user *statbuf);
+asmlinkage long compat_sys_lstat64(const char __user * filename,
+                                  struct compat_stat64 __user *statbuf);
+asmlinkage long compat_sys_fstat64(unsigned int fd,
+                                  struct compat_stat64 __user * statbuf);
+asmlinkage long compat_sys_fstatat64(unsigned int dfd,
+                                    const char __user *filename,
+                                    struct compat_stat64 __user * statbuf, int flag);
+asmlinkage compat_ssize_t sys32_pread64(unsigned int fd,
+                                       char __user *ubuf,
+                                       compat_size_t count,
+                                       unsigned long poshi,
+                                       unsigned long poslo);
+asmlinkage compat_ssize_t sys32_pwrite64(unsigned int fd,
+                                        char __user *ubuf,
+                                        compat_size_t count,
+                                        unsigned long poshi,
+                                        unsigned long poslo);
+asmlinkage long compat_sys_readahead(int fd,
+                                    unsigned long offhi,
+                                    unsigned long offlo,
+                                    compat_size_t count);
+long compat_sys_fadvise64(int fd,
+                         unsigned long offhi,
+                         unsigned long offlo,
+                         compat_size_t len, int advice);
+long compat_sys_fadvise64_64(int fd,
+                            unsigned long offhi, unsigned long offlo,
+                            unsigned long lenhi, unsigned long lenlo,
+                            int advice);
+long sys32_sync_file_range(unsigned int fd,
+                          unsigned long off_high, unsigned long off_low,
+                          unsigned long nb_high, unsigned long nb_low,
+                          unsigned int flags);
+asmlinkage long compat_sys_fallocate(int fd, int mode, u32 offhi, u32 offlo,
+                                    u32 lenhi, u32 lenlo);
+asmlinkage long compat_sys_fstat64(unsigned int fd,
+                                  struct compat_stat64 __user * statbuf);
+asmlinkage long compat_sys_fstatat64(unsigned int dfd,
+                                    const char __user *filename,
+                                    struct compat_stat64 __user * statbuf,
+                                    int flag);
+#endif /* CONFIG_SPARC64 */
 #endif /* _SYSTBLS_H */
index 151ace8..85fe9b1 100644 (file)
@@ -86,3 +86,4 @@ sys_call_table:
 /*330*/        .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
 /*335*/        .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+/*345*/        .long sys_renameat2
index 4bd4e2b..33ecba2 100644 (file)
@@ -87,6 +87,7 @@ sys_call_table32:
 /*330*/        .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
        .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys32_renameat2
 
 #endif /* CONFIG_COMPAT */
 
@@ -165,3 +166,4 @@ sys_call_table:
 /*330*/        .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
        .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
 /*340*/        .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
+       .word sys_renameat2
diff --git a/arch/sparc/kernel/tadpole.c b/arch/sparc/kernel/tadpole.c
deleted file mode 100644 (file)
index 9aba8bd..0000000
+++ /dev/null
@@ -1,126 +0,0 @@
-/* tadpole.c: Probing for the tadpole clock stopping h/w at boot time.
- *
- * Copyright (C) 1996 David Redman (djhr@tadpole.co.uk)
- */
-
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/sched.h>
-#include <linux/init.h>
-
-#include <asm/asi.h>
-#include <asm/oplib.h>
-#include <asm/io.h>
-
-#define MACIO_SCSI_CSR_ADDR    0x78400000
-#define MACIO_EN_DMA           0x00000200
-#define CLOCK_INIT_DONE                1
-
-static int clk_state;
-static volatile unsigned char *clk_ctrl;
-void (*cpu_pwr_save)(void);
-
-static inline unsigned int ldphys(unsigned int addr)
-{
-       unsigned long data;
-    
-       __asm__ __volatile__("\n\tlda [%1] %2, %0\n\t" : 
-                            "=r" (data) :
-                            "r" (addr), "i" (ASI_M_BYPASS));
-       return data;
-}
-
-static void clk_init(void)
-{
-       __asm__ __volatile__("mov 0x6c, %%g1\n\t"
-                            "mov 0x4c, %%g2\n\t"
-                            "mov 0xdf, %%g3\n\t"
-                            "stb %%g1, [%0+3]\n\t"
-                            "stb %%g2, [%0+3]\n\t"
-                            "stb %%g3, [%0+3]\n\t" : :
-                            "r" (clk_ctrl) :
-                            "g1", "g2", "g3");
-}
-
-static void clk_slow(void)
-{
-       __asm__ __volatile__("mov 0xcc, %%g2\n\t"
-                            "mov 0x4c, %%g3\n\t"
-                            "mov 0xcf, %%g4\n\t"
-                            "mov 0xdf, %%g5\n\t"
-                            "stb %%g2, [%0+3]\n\t"
-                            "stb %%g3, [%0+3]\n\t"
-                            "stb %%g4, [%0+3]\n\t"
-                            "stb %%g5, [%0+3]\n\t" : :
-                            "r" (clk_ctrl) :
-                            "g2", "g3", "g4", "g5");
-}
-
-/*
- * Tadpole is guaranteed to be UP, using local_irq_save.
- */
-static void tsu_clockstop(void)
-{
-       unsigned int mcsr;
-       unsigned long flags;
-
-       if (!clk_ctrl)
-               return;
-       if (!(clk_state & CLOCK_INIT_DONE)) {
-               local_irq_save(flags);
-               clk_init();
-               clk_state |= CLOCK_INIT_DONE;       /* all done */
-               local_irq_restore(flags);
-               return;
-       }
-       if (!(clk_ctrl[2] & 1))
-               return;               /* no speed up yet */
-
-       local_irq_save(flags);
-
-       /* if SCSI DMA in progress, don't slow clock */
-       mcsr = ldphys(MACIO_SCSI_CSR_ADDR);
-       if ((mcsr&MACIO_EN_DMA) != 0) {
-               local_irq_restore(flags);
-               return;
-       }
-       /* TODO... the minimum clock setting ought to increase the
-        * memory refresh interval..
-        */
-       clk_slow();
-       local_irq_restore(flags);
-}
-
-static void swift_clockstop(void)
-{
-       if (!clk_ctrl)
-               return;
-       clk_ctrl[0] = 0;
-}
-
-void __init clock_stop_probe(void)
-{
-       phandle node, clk_nd;
-       char name[20];
-    
-       prom_getstring(prom_root_node, "name", name, sizeof(name));
-       if (strncmp(name, "Tadpole", 7))
-               return;
-       node = prom_getchild(prom_root_node);
-       node = prom_searchsiblings(node, "obio");
-       node = prom_getchild(node);
-       clk_nd = prom_searchsiblings(node, "clk-ctrl");
-       if (!clk_nd)
-               return;
-       printk("Clock Stopping h/w detected... ");
-       clk_ctrl = (char *) prom_getint(clk_nd, "address");
-       clk_state = 0;
-       if (name[10] == '\0') {
-               cpu_pwr_save = tsu_clockstop;
-               printk("enabled (S3)\n");
-       } else if ((name[10] == 'X') || (name[10] == 'G')) {
-               cpu_pwr_save = swift_clockstop;
-               printk("enabled (%s)\n",name+7);
-       } else
-               printk("disabled %s\n",name+7);
-}
index c4c27b0..5923d1e 100644 (file)
@@ -36,6 +36,7 @@
 #include <linux/of_device.h>
 #include <linux/platform_device.h>
 
+#include <asm/mc146818rtc.h>
 #include <asm/oplib.h>
 #include <asm/timex.h>
 #include <asm/timer.h>
@@ -47,6 +48,7 @@
 #include <asm/irq_regs.h>
 #include <asm/setup.h>
 
+#include "kernel.h"
 #include "irq.h"
 
 static __cacheline_aligned_in_smp DEFINE_SEQLOCK(timer_cs_lock);
@@ -83,7 +85,7 @@ unsigned long profile_pc(struct pt_regs *regs)
 
 EXPORT_SYMBOL(profile_pc);
 
-__volatile__ unsigned int *master_l10_counter;
+volatile u32 __iomem *master_l10_counter;
 
 int update_persistent_clock(struct timespec now)
 {
@@ -143,9 +145,9 @@ static __init void setup_timer_ce(void)
 
 static unsigned int sbus_cycles_offset(void)
 {
-       unsigned int val, offset;
+       u32 val, offset;
 
-       val = *master_l10_counter;
+       val = sbus_readl(master_l10_counter);
        offset = (val >> TIMER_VALUE_SHIFT) & TIMER_VALUE_MASK;
 
        /* Limit hit? */
index 6629829..6fd386c 100644 (file)
@@ -44,7 +44,7 @@ static void instruction_dump(unsigned long *pc)
 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
 
-void die_if_kernel(char *str, struct pt_regs *regs)
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
        int count = 0;
@@ -219,8 +219,6 @@ static unsigned long fake_fsr;
 static unsigned long fake_queue[32] __attribute__ ((aligned (8)));
 static unsigned long fake_depth;
 
-extern int do_mathemu(struct pt_regs *, struct task_struct *);
-
 void do_fpe_trap(struct pt_regs *regs, unsigned long pc, unsigned long npc,
                 unsigned long psr)
 {
index 4ced92f..fb6640e 100644 (file)
 #include <asm/prom.h>
 #include <asm/memctrl.h>
 #include <asm/cacheflush.h>
+#include <asm/setup.h>
 
 #include "entry.h"
+#include "kernel.h"
 #include "kstack.h"
 
 /* When an irrecoverable trap occurs at tl > 0, the trap entry
@@ -2209,8 +2211,6 @@ out:
        exception_exit(prev_state);
 }
 
-extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
-
 void do_fpother(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -2383,7 +2383,7 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
        return (struct reg_window *) (fp + STACK_BIAS);
 }
 
-void die_if_kernel(char *str, struct pt_regs *regs)
+void __noreturn die_if_kernel(char *str, struct pt_regs *regs)
 {
        static int die_counter;
        int count = 0;
@@ -2433,9 +2433,6 @@ EXPORT_SYMBOL(die_if_kernel);
 #define VIS_OPCODE_MASK        ((0x3 << 30) | (0x3f << 19))
 #define VIS_OPCODE_VAL ((0x2 << 30) | (0x36 << 19))
 
-extern int handle_popc(u32 insn, struct pt_regs *regs);
-extern int handle_ldf_stq(u32 insn, struct pt_regs *regs);
-
 void do_illegal_instruction(struct pt_regs *regs)
 {
        enum ctx_state prev_state = exception_enter();
@@ -2486,8 +2483,6 @@ out:
        exception_exit(prev_state);
 }
 
-extern void kernel_unaligned_trap(struct pt_regs *regs, unsigned int insn);
-
 void mem_address_unaligned(struct pt_regs *regs, unsigned long sfar, unsigned long sfsr)
 {
        enum ctx_state prev_state = exception_enter();
index c0ec897..c5c61b3 100644 (file)
 #include <linux/smp.h>
 #include <linux/perf_event.h>
 
+#include <asm/setup.h>
+
+#include "kernel.h"
+
 enum direction {
        load,    /* ld, ldd, ldh, ldsh */
        store,   /* st, std, sth, stsh */
index 35ab8b6..62098a8 100644 (file)
 #include <linux/context_tracking.h>
 #include <asm/fpumacro.h>
 #include <asm/cacheflush.h>
+#include <asm/setup.h>
 
 #include "entry.h"
+#include "kernel.h"
 
 enum direction {
        load,    /* ld, ldd, ldh, ldsh */
index 3107381..87bab0a 100644 (file)
 #include <linux/mm.h>
 #include <linux/smp.h>
 
+#include <asm/cacheflush.h>
 #include <asm/uaccess.h>
 
+#include "kernel.h"
+
 /* Do save's until all user register windows are out of the cpu. */
 void flush_user_windows(void)
 {
index dbe119b..3269b02 100644 (file)
@@ -41,7 +41,7 @@ lib-$(CONFIG_SPARC64) += GENpatch.o GENpage.o GENbzero.o
 lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o
 lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o
 
-obj-y                 += iomap.o
+obj-$(CONFIG_SPARC64) += iomap.o
 obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o
 obj-y                 += ksyms.o
 obj-$(CONFIG_SPARC64) += PeeCeeI.o
index d1b2aff..bb587d5 100644 (file)
@@ -4,20 +4,20 @@
 #include <asm/byteorder.h>
 
 #define add_ssaaaa(sh, sl, ah, al, bh, bl)                             \
-  __asm__ ("addcc %r4,%5,%1\n\t"                                               \
+  __asm__ ("addcc %r4,%5,%1\n\t"                                       \
           "addx %r2,%3,%0\n"                                           \
-          : "=r" ((USItype)(sh)),                                      \
-            "=&r" ((USItype)(sl))                                      \
+          : "=r" (sh),                                                 \
+            "=&r" (sl)                                                 \
           : "%rJ" ((USItype)(ah)),                                     \
             "rI" ((USItype)(bh)),                                      \
             "%rJ" ((USItype)(al)),                                     \
             "rI" ((USItype)(bl))                                       \
           : "cc")
 #define sub_ddmmss(sh, sl, ah, al, bh, bl)                             \
-  __asm__ ("subcc %r4,%5,%1\n\t"                                               \
+  __asm__ ("subcc %r4,%5,%1\n\t"                                       \
           "subx %r2,%3,%0\n"                                           \
-          : "=r" ((USItype)(sh)),                                      \
-            "=&r" ((USItype)(sl))                                      \
+          : "=r" (sh),                                                 \
+            "=&r" (sl)                                                 \
           : "rJ" ((USItype)(ah)),                                      \
             "rI" ((USItype)(bh)),                                      \
             "rJ" ((USItype)(al)),                                      \
@@ -65,8 +65,8 @@
        "mulscc %%g1,0,%%g1\n\t"                                        \
        "add    %%g1,%%g2,%0\n\t"                                       \
        "rd     %%y,%1\n"                                               \
-          : "=r" ((USItype)(w1)),                                      \
-            "=r" ((USItype)(w0))                                       \
+          : "=r" (w1),                                                 \
+            "=r" (w0)                                                  \
           : "%rI" ((USItype)(u)),                                      \
             "r" ((USItype)(v))                                         \
           : "%g1", "%g2", "cc")
@@ -98,8 +98,8 @@
           "sub %1,%2,%1\n\t"                                           \
           "3:  xnor    %0,0,%0\n\t"                                    \
           "! End of inline udiv_qrnnd\n"                               \
-          : "=&r" ((USItype)(q)),                                      \
-            "=&r" ((USItype)(r))                                       \
+          : "=&r" (q),                                                 \
+            "=&r" (r)                                                  \
           : "r" ((USItype)(d)),                                        \
             "1" ((USItype)(n1)),                                       \
             "0" ((USItype)(n0)) : "%g1", "cc")
index 425d3cf..51320a8 100644 (file)
@@ -17,8 +17,8 @@
           "bcs,a,pn %%xcc, 1f\n\t"             \
           "add %0, 1, %0\n"                    \
           "1:"                                 \
-          : "=r" ((UDItype)(sh)),              \
-            "=&r" ((UDItype)(sl))              \
+          : "=r" (sh),                         \
+            "=&r" (sl)                         \
           : "r" ((UDItype)(ah)),               \
             "r" ((UDItype)(bh)),               \
             "r" ((UDItype)(al)),               \
@@ -31,8 +31,8 @@
           "bcs,a,pn %%xcc, 1f\n\t"             \
           "sub %0, 1, %0\n"                    \
           "1:"                                 \
-          : "=r" ((UDItype)(sh)),              \
-            "=&r" ((UDItype)(sl))              \
+          : "=r" (sh),                         \
+            "=&r" (sl)                         \
           : "r" ((UDItype)(ah)),               \
             "r" ((UDItype)(bh)),               \
             "r" ((UDItype)(al)),               \
@@ -64,8 +64,8 @@
                   "sllx %3,32,%3\n\t"                  \
                   "add %1,%3,%1\n\t"                   \
                   "add %5,%2,%0"                       \
-          : "=r" ((UDItype)(wh)),                      \
-            "=&r" ((UDItype)(wl)),                     \
+          : "=r" (wh),                                 \
+            "=&r" (wl),                                \
             "=&r" (tmp1), "=&r" (tmp2), "=&r" (tmp3), "=&r" (tmp4) \
           : "r" ((UDItype)(u)),                        \
             "r" ((UDItype)(v))                         \
index 59dbd46..908e8c1 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/openprom.h>
 #include <asm/oplib.h>
+#include <asm/setup.h>
 #include <asm/smp.h>
 #include <asm/traps.h>
 #include <asm/uaccess.h>
 
-int show_unhandled_signals = 1;
+#include "mm_32.h"
 
-static void unhandled_fault(unsigned long, struct task_struct *,
-               struct pt_regs *) __attribute__ ((noreturn));
+int show_unhandled_signals = 1;
 
 static void __noreturn unhandled_fault(unsigned long address,
                                       struct task_struct *tsk,
@@ -141,9 +141,6 @@ static void __do_fault_siginfo(int code, int sig, struct pt_regs *regs,
        force_sig_info (sig, &info, current);
 }
 
-extern unsigned long safe_compute_effective_address(struct pt_regs *,
-                                                   unsigned int);
-
 static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
 {
        unsigned int insn;
index 4ced3fc..587cd05 100644 (file)
@@ -32,6 +32,7 @@
 #include <asm/lsu.h>
 #include <asm/sections.h>
 #include <asm/mmu_context.h>
+#include <asm/setup.h>
 
 int show_unhandled_signals = 1;
 
@@ -196,9 +197,6 @@ static void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
        force_sig_info(sig, &info, current);
 }
 
-extern int handle_ldf_stq(u32, struct pt_regs *);
-extern int handle_ld_nf(u32, struct pt_regs *);
-
 static unsigned int get_fault_insn(struct pt_regs *regs, unsigned int insn)
 {
        if (!insn) {
index db69870..eb82871 100644 (file)
 #include <asm/pgtable.h>
 #include <asm/vaddrs.h>
 #include <asm/pgalloc.h>       /* bug in asm-generic/tlb.h: check_pgt_cache */
+#include <asm/setup.h>
 #include <asm/tlb.h>
 #include <asm/prom.h>
 #include <asm/leon.h>
 
+#include "mm_32.h"
+
 unsigned long *sparc_valid_addr_bitmap;
 EXPORT_SYMBOL(sparc_valid_addr_bitmap);
 
@@ -63,7 +66,6 @@ void show_mem(unsigned int filter)
 }
 
 
-extern unsigned long cmdline_memory_size;
 unsigned long last_valid_pfn;
 
 unsigned long calc_highpages(void)
@@ -246,9 +248,6 @@ unsigned long __init bootmem_init(unsigned long *pages_avail)
  * init routine based upon the Sun model type on the Sparc.
  *
  */
-extern void srmmu_paging_init(void);
-extern void device_scan(void);
-
 void __init paging_init(void)
 {
        srmmu_paging_init();
index ed3c969..16b58ff 100644 (file)
@@ -47,6 +47,7 @@
 #include <asm/prom.h>
 #include <asm/mdesc.h>
 #include <asm/cpudata.h>
+#include <asm/setup.h>
 #include <asm/irq.h>
 
 #include "init_64.h"
@@ -794,11 +795,11 @@ struct node_mem_mask {
 static struct node_mem_mask node_masks[MAX_NUMNODES];
 static int num_node_masks;
 
+#ifdef CONFIG_NEED_MULTIPLE_NODES
+
 int numa_cpu_lookup_table[NR_CPUS];
 cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
 
-#ifdef CONFIG_NEED_MULTIPLE_NODES
-
 struct mdesc_mblock {
        u64     base;
        u64     size;
@@ -887,17 +888,21 @@ static void __init allocate_node_data(int nid)
 
 static void init_node_masks_nonnuma(void)
 {
+#ifdef CONFIG_NEED_MULTIPLE_NODES
        int i;
+#endif
 
        numadbg("Initializing tables for non-numa.\n");
 
        node_masks[0].mask = node_masks[0].val = 0;
        num_node_masks = 1;
 
+#ifdef CONFIG_NEED_MULTIPLE_NODES
        for (i = 0; i < NR_CPUS; i++)
                numa_cpu_lookup_table[i] = 0;
 
        cpumask_setall(&numa_cpumask_lookup_table[0]);
+#endif
 }
 
 #ifdef CONFIG_NEED_MULTIPLE_NODES
index 5d3782d..0668b36 100644 (file)
@@ -21,7 +21,7 @@ extern unsigned int sparc64_highest_unlocked_tlb_ent;
 extern unsigned long sparc64_kern_pri_context;
 extern unsigned long sparc64_kern_pri_nuc_bits;
 extern unsigned long sparc64_kern_sec_context;
-extern void mmu_info(struct seq_file *m);
+void mmu_info(struct seq_file *m);
 
 struct linux_prom_translation {
        unsigned long virt;
@@ -36,7 +36,7 @@ extern unsigned int prom_trans_ents;
 /* Exported for SMP bootup purposes. */
 extern unsigned long kern_locked_tte_data;
 
-extern void prom_world(int enter);
+void prom_world(int enter);
 
 #ifdef CONFIG_SPARSEMEM_VMEMMAP
 #define VMEMMAP_CHUNK_SHIFT    22
index eb99862..f311bf2 100644 (file)
@@ -25,6 +25,8 @@
 #include <asm/dma.h>
 #include <asm/oplib.h>
 
+#include "mm_32.h"
+
 /* #define IOUNIT_DEBUG */
 #ifdef IOUNIT_DEBUG
 #define IOD(x) printk(x)
@@ -38,7 +40,8 @@
 static void __init iounit_iommu_init(struct platform_device *op)
 {
        struct iounit_struct *iounit;
-       iopte_t *xpt, *xptend;
+       iopte_t __iomem *xpt;
+       iopte_t __iomem *xptend;
 
        iounit = kzalloc(sizeof(struct iounit_struct), GFP_ATOMIC);
        if (!iounit) {
@@ -62,10 +65,10 @@ static void __init iounit_iommu_init(struct platform_device *op)
        op->dev.archdata.iommu = iounit;
        iounit->page_table = xpt;
        spin_lock_init(&iounit->lock);
-       
-       for (xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
-            xpt < xptend;)
-               iopte_val(*xpt++) = 0;
+
+       xptend = iounit->page_table + (16 * PAGE_SIZE) / sizeof(iopte_t);
+       for (; xpt < xptend; xpt++)
+               sbus_writel(0, xpt);
 }
 
 static int __init iounit_init(void)
@@ -130,7 +133,7 @@ nexti:      scan = find_next_zero_bit(iounit->bmap, limit, scan);
        vaddr = IOUNIT_DMA_BASE + (scan << PAGE_SHIFT) + (vaddr & ~PAGE_MASK);
        for (k = 0; k < npages; k++, iopte = __iopte(iopte_val(iopte) + 0x100), scan++) {
                set_bit(scan, iounit->bmap);
-               iounit->page_table[scan] = iopte;
+               sbus_writel(iopte, &iounit->page_table[scan]);
        }
        IOD(("%08lx\n", vaddr));
        return vaddr;
@@ -202,7 +205,7 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
        struct iounit_struct *iounit = dev->archdata.iommu;
        unsigned long page, end;
        pgprot_t dvma_prot;
-       iopte_t *iopte;
+       iopte_t __iomem *iopte;
 
        *pba = addr;
 
@@ -224,8 +227,8 @@ static int iounit_map_dma_area(struct device *dev, dma_addr_t *pba, unsigned lon
                        
                        i = ((addr - IOUNIT_DMA_BASE) >> PAGE_SHIFT);
 
-                       iopte = (iopte_t *)(iounit->page_table + i);
-                       *iopte = MKIOPTE(__pa(page));
+                       iopte = iounit->page_table + i;
+                       sbus_writel(MKIOPTE(__pa(page)), iopte);
                }
                addr += PAGE_SIZE;
                va += PAGE_SIZE;
index 28f96f2..491511d 100644 (file)
@@ -27,6 +27,8 @@
 #include <asm/iommu.h>
 #include <asm/dma.h>
 
+#include "mm_32.h"
+
 /*
  * This can be sized dynamically, but we will do this
  * only when we have a guidance about actual I/O pressures.
@@ -37,9 +39,6 @@
 #define IOMMU_NPTES    (IOMMU_WINSIZE/PAGE_SIZE)       /* 64K PTEs, 256KB */
 #define IOMMU_ORDER    6                               /* 4096 * (1<<6) */
 
-/* srmmu.c */
-extern int viking_mxcc_present;
-extern int flush_page_for_dma_global;
 static int viking_flush;
 /* viking.S */
 extern void viking_flush_page(unsigned long page);
@@ -59,6 +58,8 @@ static void __init sbus_iommu_init(struct platform_device *op)
        struct iommu_struct *iommu;
        unsigned int impl, vers;
        unsigned long *bitmap;
+       unsigned long control;
+       unsigned long base;
        unsigned long tmp;
 
        iommu = kmalloc(sizeof(struct iommu_struct), GFP_KERNEL);
@@ -73,12 +74,14 @@ static void __init sbus_iommu_init(struct platform_device *op)
                prom_printf("Cannot map IOMMU registers\n");
                prom_halt();
        }
-       impl = (iommu->regs->control & IOMMU_CTRL_IMPL) >> 28;
-       vers = (iommu->regs->control & IOMMU_CTRL_VERS) >> 24;
-       tmp = iommu->regs->control;
-       tmp &= ~(IOMMU_CTRL_RNGE);
-       tmp |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
-       iommu->regs->control = tmp;
+
+       control = sbus_readl(&iommu->regs->control);
+       impl = (control & IOMMU_CTRL_IMPL) >> 28;
+       vers = (control & IOMMU_CTRL_VERS) >> 24;
+       control &= ~(IOMMU_CTRL_RNGE);
+       control |= (IOMMU_RNGE_256MB | IOMMU_CTRL_ENAB);
+       sbus_writel(control, &iommu->regs->control);
+
        iommu_invalidate(iommu->regs);
        iommu->start = IOMMU_START;
        iommu->end = 0xffffffff;
@@ -100,7 +103,9 @@ static void __init sbus_iommu_init(struct platform_device *op)
        memset(iommu->page_table, 0, IOMMU_NPTES*sizeof(iopte_t));
        flush_cache_all();
        flush_tlb_all();
-       iommu->regs->base = __pa((unsigned long) iommu->page_table) >> 4;
+
+       base = __pa((unsigned long)iommu->page_table) >> 4;
+       sbus_writel(base, &iommu->regs->base);
        iommu_invalidate(iommu->regs);
 
        bitmap = kmalloc(IOMMU_NPTES>>3, GFP_KERNEL);
index 5bed085..3b17b6f 100644 (file)
 #include <asm/leon.h>
 #include <asm/tlbflush.h>
 
-#include "srmmu.h"
+#include "mm_32.h"
 
 int leon_flush_during_switch = 1;
-int srmmu_swprobe_trace;
+static int srmmu_swprobe_trace;
 
 static inline unsigned long leon_get_ctable_ptr(void)
 {
diff --git a/arch/sparc/mm/mm_32.h b/arch/sparc/mm/mm_32.h
new file mode 100644 (file)
index 0000000..a6c27ca
--- /dev/null
@@ -0,0 +1,24 @@
+/* fault_32.c - visible as they are called from assembler */
+asmlinkage int lookup_fault(unsigned long pc, unsigned long ret_pc,
+                            unsigned long address);
+asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+                               unsigned long address);
+
+void window_overflow_fault(void);
+void window_underflow_fault(unsigned long sp);
+void window_ret_fault(struct pt_regs *regs);
+
+/* srmmu.c */
+extern char *srmmu_name;
+extern int viking_mxcc_present;
+extern int flush_page_for_dma_global;
+
+extern void (*poke_srmmu)(void);
+
+void __init srmmu_paging_init(void);
+
+/* iommu.c */
+void ld_mmu_iommu(void);
+
+/* io-unit.c */
+void ld_mmu_iounit(void);
index cfbe53c..be65f03 100644 (file)
@@ -49,7 +49,7 @@
 #include <asm/mxcc.h>
 #include <asm/ross.h>
 
-#include "srmmu.h"
+#include "mm_32.h"
 
 enum mbus_module srmmu_modtype;
 static unsigned int hwbug_bitmask;
@@ -100,7 +100,6 @@ static unsigned long srmmu_nocache_end;
 #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
 
 void *srmmu_nocache_pool;
-void *srmmu_nocache_bitmap;
 static struct bit_map srmmu_nocache_map;
 
 static inline int srmmu_pmd_none(pmd_t pmd)
@@ -173,7 +172,7 @@ static void *__srmmu_get_nocache(int size, int align)
                printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
                       size, (int) srmmu_nocache_size,
                       srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
-               return 0;
+               return NULL;
        }
 
        addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
@@ -269,6 +268,7 @@ static void __init srmmu_nocache_calcsize(void)
 
 static void __init srmmu_nocache_init(void)
 {
+       void *srmmu_nocache_bitmap;
        unsigned int bitmap_bits;
        pgd_t *pgd;
        pmd_t *pmd;
@@ -728,7 +728,7 @@ static inline unsigned long srmmu_probe(unsigned long vaddr)
                                     "=r" (retval) :
                                     "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
        } else {
-               retval = leon_swprobe(vaddr, 0);
+               retval = leon_swprobe(vaddr, NULL);
        }
        return retval;
 }
@@ -865,8 +865,6 @@ static void __init map_kernel(void)
 
 void (*poke_srmmu)(void) = NULL;
 
-extern unsigned long bootmem_init(unsigned long *pages_avail);
-
 void __init srmmu_paging_init(void)
 {
        int i;
@@ -1771,9 +1769,6 @@ static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
 /* Load up routines and constants for sun4m and sun4d mmu */
 void __init load_mmu(void)
 {
-       extern void ld_mmu_iommu(void);
-       extern void ld_mmu_iounit(void);
-
        /* Functions */
        get_srmmu_type();
 
diff --git a/arch/sparc/mm/srmmu.h b/arch/sparc/mm/srmmu.h
deleted file mode 100644 (file)
index 5703274..0000000
+++ /dev/null
@@ -1,4 +0,0 @@
-/* srmmu.c */
-extern char *srmmu_name;
-
-extern void (*poke_srmmu)(void);
index fe19b81..a065766 100644 (file)
@@ -9,6 +9,7 @@
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/mmu_context.h>
+#include <asm/setup.h>
 #include <asm/tsb.h>
 #include <asm/tlb.h>
 #include <asm/oplib.h>
index 892a102..1f76c22 100644 (file)
@@ -354,7 +354,7 @@ do {        *prog++ = BR_OPC | WDISP22(OFF);                \
  * emit_jump() calls with adjusted offsets.
  */
 
-void bpf_jit_compile(struct sk_filter *fp)
+void bpf_jit_compile(struct bpf_prog *fp)
 {
        unsigned int cleanup_addr, proglen, oldproglen = 0;
        u32 temp[8], *prog, *func, seen = 0, pass;
@@ -808,7 +808,7 @@ out:
        return;
 }
 
-void bpf_jit_free(struct sk_filter *fp)
+void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited)
                module_free(NULL, fp->bpf_func);
index f178b9d..53a696d 100644 (file)
@@ -81,11 +81,6 @@ void prom_feval(const char *fstring)
 }
 EXPORT_SYMBOL(prom_feval);
 
-#ifdef CONFIG_SMP
-extern void smp_capture(void);
-extern void smp_release(void);
-#endif
-
 /* Drop into the prom, with the chance to continue with the 'go'
  * prom command.
  */
index 9472079..f1b3eb1 100644 (file)
@@ -12,6 +12,7 @@
 #include <mem_user.h>
 #include <os.h>
 #include <skas.h>
+#include <kern_util.h>
 
 struct host_vm_change {
        struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
        struct host_vm_op *last;
        int ret = 0;
 
+       if ((addr >= STUB_START) && (addr < STUB_END))
+               return -EINVAL;
+
        if (hvc->index != 0) {
                last = &hvc->ops[hvc->index - 1];
                if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
        /* This is not an else because ret is modified above */
        if (ret) {
                printk(KERN_ERR "fix_range_common: failed, killing current "
-                      "process\n");
+                      "process: %d\n", task_tgid_vnr(current));
+               /* We are under mmap_sem, release it such that current can terminate */
+               up_write(&current->mm->mmap_sem);
                force_sig(SIGKILL, current);
+               do_signal();
        }
 }
 
index 974b874..5678c35 100644 (file)
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
        int is_write = FAULT_WRITE(fi);
        unsigned long address = FAULT_ADDRESS(fi);
 
-       if (regs)
+       if (!is_user && regs)
                current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
 
        if (!is_user && (address >= start_vm) && (address < end_vm)) {
index d531879..908579f 100644 (file)
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
 
 void wait_stub_done(int pid)
 {
-       int n, status, err, bad_stop = 0;
+       int n, status, err;
 
        while (1) {
                CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
 
        if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
                return;
-       else
-               bad_stop = 1;
 
 bad_wait:
        err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
        printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
               "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
               status);
-       if (bad_stop)
-               kill(pid, SIGKILL);
-       else
-               fatal_sigsegv();
+       fatal_sigsegv();
 }
 
 extern unsigned long current_stub_stack(void);
index aafad6f..928237a 100644 (file)
@@ -51,9 +51,6 @@ config ARCH_HAS_ILOG2_U32
 config ARCH_HAS_ILOG2_U64
        bool
 
-config ARCH_HAS_CPUFREQ
-       bool
-
 config GENERIC_HWEIGHT
        def_bool y
 
@@ -87,7 +84,6 @@ config ARCH_PUV3
        select GENERIC_CLOCKEVENTS
        select HAVE_CLK
        select ARCH_REQUIRE_GPIOLIB
-       select ARCH_HAS_CPUFREQ
 
 # CONFIGs for ARCH_PUV3
 
@@ -198,9 +194,7 @@ menu "Power management options"
 
 source "kernel/power/Kconfig"
 
-if ARCH_HAS_CPUFREQ
 source "drivers/cpufreq/Kconfig"
-endif
 
 config ARCH_SUSPEND_POSSIBLE
        def_bool y if !ARCH_FPGA
index 39decb6..cb1d8fd 100644 (file)
@@ -39,10 +39,37 @@ extern void __uc32_iounmap(volatile void __iomem *addr);
 #define ioremap_nocache(cookie, size)  __uc32_ioremap(cookie, size)
 #define iounmap(cookie)                        __uc32_iounmap(cookie)
 
+#define readb_relaxed readb
+#define readw_relaxed readw
+#define readl_relaxed readl
+
 #define HAVE_ARCH_PIO_SIZE
 #define PIO_OFFSET             (unsigned int)(PCI_IOBASE)
 #define PIO_MASK               (unsigned int)(IO_SPACE_LIMIT)
 #define PIO_RESERVED           (PIO_OFFSET + PIO_MASK + 1)
 
+#ifdef CONFIG_STRICT_DEVMEM
+
+#include <linux/ioport.h>
+#include <linux/mm.h>
+
+/*
+ * devmem_is_allowed() checks to see if /dev/mem access to a certain
+ * address is valid. The argument is a physical page number.
+ * We mimic x86 here by disallowing access to system RAM as well as
+ * device-exclusive MMIO regions. This effectively disable read()/write()
+ * on /dev/mem.
+ */
+static inline int devmem_is_allowed(unsigned long pfn)
+{
+       if (iomem_is_exclusive(pfn << PAGE_SHIFT))
+               return 0;
+       if (!page_is_ram(pfn))
+               return 1;
+       return 0;
+}
+
+#endif /* CONFIG_STRICT_DEVMEM */
+
 #endif /* __KERNEL__ */
 #endif /* __UNICORE_IO_H__ */
index 233c258..ed6f7d0 100644 (file)
@@ -87,16 +87,16 @@ extern pgprot_t pgprot_kernel;
 
 #define PAGE_NONE              pgprot_user
 #define PAGE_SHARED            __pgprot(pgprot_val(pgprot_user | PTE_READ \
-                                                               | PTE_WRITE)
+                                                               | PTE_WRITE))
 #define PAGE_SHARED_EXEC       __pgprot(pgprot_val(pgprot_user | PTE_READ \
                                                                | PTE_WRITE \
-                                                               | PTE_EXEC)
+                                                               | PTE_EXEC))
 #define PAGE_COPY              __pgprot(pgprot_val(pgprot_user | PTE_READ)
 #define PAGE_COPY_EXEC         __pgprot(pgprot_val(pgprot_user | PTE_READ \
-                                                               | PTE_EXEC)
-#define PAGE_READONLY          __pgprot(pgprot_val(pgprot_user | PTE_READ)
+                                                               | PTE_EXEC))
+#define PAGE_READONLY          __pgprot(pgprot_val(pgprot_user | PTE_READ))
 #define PAGE_READONLY_EXEC     __pgprot(pgprot_val(pgprot_user | PTE_READ \
-                                                               | PTE_EXEC)
+                                                               | PTE_EXEC))
 #define PAGE_KERNEL            pgprot_kernel
 #define PAGE_KERNEL_EXEC       __pgprot(pgprot_val(pgprot_kernel | PTE_EXEC))
 
index 9df53d9..02bf5a4 100644 (file)
@@ -55,6 +55,7 @@ static inline int valid_user_regs(struct pt_regs *regs)
 
 #define instruction_pointer(regs)      ((regs)->UCreg_pc)
 #define user_stack_pointer(regs)       ((regs)->UCreg_sp)
+#define profile_pc(regs)               instruction_pointer(regs)
 
 #endif /* __ASSEMBLY__ */
 #endif
index 18d4563..b1ca775 100644 (file)
@@ -179,7 +179,7 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
        }
 #ifdef CONFIG_CPU_FREQ
        if (clk == &clk_mclk_clk) {
-               u32 pll_rate, divstatus = PM_DIVSTATUS;
+               u32 pll_rate, divstatus = readl(PM_DIVSTATUS);
                int ret, i;
 
                /* lookup mclk_clk_table */
@@ -201,10 +201,10 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
                                / (((divstatus & 0x0000f000) >> 12) + 1);
 
                /* set pll sys cfg reg. */
-               PM_PLLSYSCFG = pll_rate;
+               writel(pll_rate, PM_PLLSYSCFG);
 
-               PM_PMCR = PM_PMCR_CFBSYS;
-               while ((PM_PLLDFCDONE & PM_PLLDFCDONE_SYSDFC)
+               writel(PM_PMCR_CFBSYS, PM_PMCR);
+               while ((readl(PM_PLLDFCDONE) & PM_PLLDFCDONE_SYSDFC)
                                != PM_PLLDFCDONE_SYSDFC)
                        udelay(100);
                        /* about 1ms */
index d285d71..0323528 100644 (file)
 
 #include "ksyms.h"
 
+EXPORT_SYMBOL(find_first_bit);
+EXPORT_SYMBOL(find_first_zero_bit);
 EXPORT_SYMBOL(find_next_zero_bit);
 EXPORT_SYMBOL(find_next_bit);
 
-EXPORT_SYMBOL(__backtrace);
-
        /* platform dependent support */
 EXPORT_SYMBOL(__udelay);
 EXPORT_SYMBOL(__const_udelay);
 
-       /* networking */
-EXPORT_SYMBOL(csum_partial);
-EXPORT_SYMBOL(csum_partial_copy_from_user);
-EXPORT_SYMBOL(csum_partial_copy_nocheck);
-EXPORT_SYMBOL(__csum_ipv6_magic);
-
-       /* io */
-#ifndef __raw_readsb
-EXPORT_SYMBOL(__raw_readsb);
-#endif
-#ifndef __raw_readsw
-EXPORT_SYMBOL(__raw_readsw);
-#endif
-#ifndef __raw_readsl
-EXPORT_SYMBOL(__raw_readsl);
-#endif
-#ifndef __raw_writesb
-EXPORT_SYMBOL(__raw_writesb);
-#endif
-#ifndef __raw_writesw
-EXPORT_SYMBOL(__raw_writesw);
-#endif
-#ifndef __raw_writesl
-EXPORT_SYMBOL(__raw_writesl);
-#endif
-
        /* string / mem functions */
 EXPORT_SYMBOL(strchr);
 EXPORT_SYMBOL(strrchr);
@@ -76,23 +50,12 @@ EXPORT_SYMBOL(__copy_from_user);
 EXPORT_SYMBOL(__copy_to_user);
 EXPORT_SYMBOL(__clear_user);
 
-EXPORT_SYMBOL(__get_user_1);
-EXPORT_SYMBOL(__get_user_2);
-EXPORT_SYMBOL(__get_user_4);
-
-EXPORT_SYMBOL(__put_user_1);
-EXPORT_SYMBOL(__put_user_2);
-EXPORT_SYMBOL(__put_user_4);
-EXPORT_SYMBOL(__put_user_8);
-
 EXPORT_SYMBOL(__ashldi3);
 EXPORT_SYMBOL(__ashrdi3);
 EXPORT_SYMBOL(__divsi3);
 EXPORT_SYMBOL(__lshrdi3);
 EXPORT_SYMBOL(__modsi3);
-EXPORT_SYMBOL(__muldi3);
 EXPORT_SYMBOL(__ucmpdi2);
 EXPORT_SYMBOL(__udivsi3);
 EXPORT_SYMBOL(__umodsi3);
-EXPORT_SYMBOL(__bswapsi2);
 
index 185cdc7..31472ad 100644 (file)
@@ -8,8 +8,6 @@ extern void __ashrdi3(void);
 extern void __divsi3(void);
 extern void __lshrdi3(void);
 extern void __modsi3(void);
-extern void __muldi3(void);
 extern void __ucmpdi2(void);
 extern void __udivsi3(void);
 extern void __umodsi3(void);
-extern void __bswapsi2(void);
index 16bd149..dc41f6d 100644 (file)
 
 void *module_alloc(unsigned long size)
 {
-       struct vm_struct *area;
-
-       size = PAGE_ALIGN(size);
-       area = __get_vm_area(size, VM_ALLOC, MODULES_VADDR, MODULES_END);
-       if (!area)
-               return NULL;
-
-       return __vmalloc_area(area, GFP_KERNEL, PAGE_KERNEL_EXEC);
+       return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+                               GFP_KERNEL, PAGE_KERNEL_EXEC, NUMA_NO_NODE,
+                               __builtin_return_address(0));
 }
 
 int
index 778ebba..b008e99 100644 (file)
@@ -60,6 +60,7 @@ void machine_halt(void)
  * Function pointers to optional machine specific functions
  */
 void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
 
 void machine_power_off(void)
 {
index 87adbf5..3fa317f 100644 (file)
@@ -53,6 +53,10 @@ struct stack {
 
 static struct stack stacks[NR_CPUS];
 
+#ifdef CONFIG_VGA_CONSOLE
+struct screen_info screen_info;
+#endif
+
 char elf_platform[ELF_PLATFORM_SIZE];
 EXPORT_SYMBOL(elf_platform);
 
index de7dc5f..24e8360 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/sched.h>
 #include <linux/uaccess.h>
 
+#include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/unaligned.h>
 
index f30071e..21c00fc 100644 (file)
@@ -19,5 +19,7 @@
 EXPORT_SYMBOL(cpu_dcache_clean_area);
 EXPORT_SYMBOL(cpu_set_pte);
 
+EXPORT_SYMBOL(__cpuc_coherent_kern_range);
+
 EXPORT_SYMBOL(__cpuc_dma_flush_range);
 EXPORT_SYMBOL(__cpuc_dma_clean_range);
index fcefdda..d24887b 100644 (file)
@@ -131,6 +131,7 @@ config X86
        select HAVE_CC_STACKPROTECTOR
        select GENERIC_CPU_AUTOPROBE
        select HAVE_ARCH_AUDITSYSCALL
+       select ARCH_SUPPORTS_ATOMIC_RMW
 
 config INSTRUCTION_DECODER
        def_bool y
@@ -1672,7 +1673,6 @@ config RELOCATABLE
 config RANDOMIZE_BASE
        bool "Randomize the address of the kernel image"
        depends on RELOCATABLE
-       depends on !HIBERNATION
        default n
        ---help---
           Randomizes the physical and virtual address at which the
index 4dbf967..fc6091a 100644 (file)
@@ -289,10 +289,17 @@ unsigned char *choose_kernel_location(unsigned char *input,
        unsigned long choice = (unsigned long)output;
        unsigned long random;
 
+#ifdef CONFIG_HIBERNATION
+       if (!cmdline_find_option_bool("kaslr")) {
+               debug_putstr("KASLR disabled by default...\n");
+               goto out;
+       }
+#else
        if (cmdline_find_option_bool("nokaslr")) {
-               debug_putstr("KASLR disabled...\n");
+               debug_putstr("KASLR disabled by cmdline...\n");
                goto out;
        }
+#endif
 
        /* Record the various known unsafe memory ranges. */
        mem_avoid_init((unsigned long)input, input_size,
index 84c2234..7a6d43a 100644 (file)
@@ -91,10 +91,9 @@ bs_die:
 
        .section ".bsdata", "a"
 bugger_off_msg:
-       .ascii  "Direct floppy boot is not supported. "
-       .ascii  "Use a boot loader program instead.\r\n"
+       .ascii  "Use a boot loader.\r\n"
        .ascii  "\n"
-       .ascii  "Remove disk and press any key to reboot ...\r\n"
+       .ascii  "Remove disk and press any key to reboot...\r\n"
        .byte   0
 
 #ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
 #else
        .word   0x8664                          # x86-64
 #endif
-       .word   3                               # nr_sections
+       .word   4                               # nr_sections
        .long   0                               # TimeDateStamp
        .long   0                               # PointerToSymbolTable
        .long   1                               # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
        .word   0                               # NumberOfLineNumbers
        .long   0x60500020                      # Characteristics (section flags)
 
+       #
+       # The offset & size fields are filled in by build.c.
+       #
+       .ascii  ".bss"
+       .byte   0
+       .byte   0
+       .byte   0
+       .byte   0
+       .long   0
+       .long   0x0
+       .long   0                               # Size of initialized data
+                                               # on disk
+       .long   0x0
+       .long   0                               # PointerToRelocations
+       .long   0                               # PointerToLineNumbers
+       .word   0                               # NumberOfRelocations
+       .word   0                               # NumberOfLineNumbers
+       .long   0xc8000080                      # Characteristics (section flags)
+
 #endif /* CONFIG_EFI_STUB */
 
        # Kernel attributes; used by setup.  This is part 1 of the
index 1a2f212..a7661c4 100644 (file)
@@ -143,7 +143,7 @@ static void usage(void)
 
 #ifdef CONFIG_EFI_STUB
 
-static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
 {
        unsigned int pe_header;
        unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
                        put_unaligned_le32(size, section + 0x8);
 
                        /* section header vma field */
-                       put_unaligned_le32(offset, section + 0xc);
+                       put_unaligned_le32(vma, section + 0xc);
 
                        /* section header 'size of initialised data' field */
-                       put_unaligned_le32(size, section + 0x10);
+                       put_unaligned_le32(datasz, section + 0x10);
 
                        /* section header 'file offset' field */
                        put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
        }
 }
 
+static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
+{
+       update_pecoff_section_header_fields(section_name, offset, size, size, offset);
+}
+
 static void update_pecoff_setup_and_reloc(unsigned int size)
 {
        u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
 
        pe_header = get_unaligned_le32(&buf[0x3c]);
 
-       /* Size of image */
-       put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
-
        /*
         * Size of code: Subtract the size of the first sector (512 bytes)
         * which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
        update_pecoff_section_header(".text", text_start, text_sz);
 }
 
+static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
+{
+       unsigned int pe_header;
+       unsigned int bss_sz = init_sz - file_sz;
+
+       pe_header = get_unaligned_le32(&buf[0x3c]);
+
+       /* Size of uninitialized data */
+       put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
+
+       /* Size of image */
+       put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
+
+       update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
+}
+
 static int reserve_pecoff_reloc_section(int c)
 {
        /* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
 static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
 static inline void update_pecoff_text(unsigned int text_start,
                                      unsigned int file_sz) {}
+static inline void update_pecoff_bss(unsigned int file_sz,
+                                    unsigned int init_sz) {}
 static inline void efi_stub_defaults(void) {}
 static inline void efi_stub_entry_update(void) {}
 
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
 
 int main(int argc, char ** argv)
 {
-       unsigned int i, sz, setup_sectors;
+       unsigned int i, sz, setup_sectors, init_sz;
        int c;
        u32 sys_size;
        struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
        buf[0x1f1] = setup_sectors-1;
        put_unaligned_le32(sys_size, &buf[0x1f4]);
 
-       update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
+       update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
+       init_sz = get_unaligned_le32(&buf[0x260]);
+       update_pecoff_bss(i + (sys_size * 16), init_sz);
 
        efi_stub_entry_update();
 
index f30cd10..8626b03 100644 (file)
@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
 
        /* save number of bits */
        bits[1] = cpu_to_be64(sctx->count[0] << 3);
-       bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61;
+       bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
 
        /* Pad out to 112 mod 128 and append length */
        index = sctx->count[0] & 0x7f;
index cb6cfcd..a80cbb8 100644 (file)
@@ -43,7 +43,7 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
 extern void init_ISA_irqs(void);
 
 #ifdef CONFIG_X86_LOCAL_APIC
-void arch_trigger_all_cpu_backtrace(void);
+void arch_trigger_all_cpu_backtrace(bool);
 #define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
 #endif
 
index 4931415..49205d0 100644 (file)
@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
 #define KVM_REFILL_PAGES 25
 #define KVM_MAX_CPUID_ENTRIES 80
 #define KVM_NR_FIXED_MTRR_REGION 88
-#define KVM_NR_VAR_MTRR 8
+#define KVM_NR_VAR_MTRR 10
 
 #define ASYNC_PF_PER_VCPU 64
 
@@ -461,7 +461,7 @@ struct kvm_vcpu_arch {
        bool nmi_injected;    /* Trying to inject an NMI this entry */
 
        struct mtrr_state_type mtrr_state;
-       u32 pat;
+       u64 pat;
 
        unsigned switch_db_regs;
        unsigned long db[KVM_NR_DB_REGS];
index 14fd6fd..6205f0c 100644 (file)
@@ -231,6 +231,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
 
 #define ARCH_HAS_USER_SINGLE_STEP_INFO
 
+/*
+ * When hitting ptrace_stop(), we cannot return using SYSRET because
+ * that does not restore the full CPU state, only a minimal set.  The
+ * ptracer can change arbitrary register values, which is usually okay
+ * because the usual ptrace stops run off the signal delivery path which
+ * forces IRET; however, ptrace_event() stops happen in arbitrary places
+ * in the kernel and don't force IRET path.
+ *
+ * So force IRET path after a ptrace stop.
+ */
+#define arch_ptrace_stop_needed(code, info)                            \
+({                                                                     \
+       set_thread_flag(TIF_NOTIFY_RESUME);                             \
+       false;                                                          \
+})
+
 struct user_desc;
 extern int do_get_thread_area(struct task_struct *p, int idx,
                              struct user_desc __user *info);
index c3fcb5d..6a1e71b 100644 (file)
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
 /* "in progress" flag of arch_trigger_all_cpu_backtrace */
 static unsigned long backtrace_flag;
 
-void arch_trigger_all_cpu_backtrace(void)
+void arch_trigger_all_cpu_backtrace(bool include_self)
 {
        int i;
+       int cpu = get_cpu();
 
-       if (test_and_set_bit(0, &backtrace_flag))
+       if (test_and_set_bit(0, &backtrace_flag)) {
                /*
                 * If there is already a trigger_all_cpu_backtrace() in progress
                 * (backtrace_flag == 1), don't output double cpu dump infos.
                 */
+               put_cpu();
                return;
+       }
 
        cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
+       if (!include_self)
+               cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
 
-       printk(KERN_INFO "sending NMI to all CPUs:\n");
-       apic->send_IPI_all(NMI_VECTOR);
+       if (!cpumask_empty(to_cpumask(backtrace_mask))) {
+               pr_info("sending NMI to %s CPUs:\n",
+                       (include_self ? "all" : "other"));
+               apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
+       }
 
        /* Wait for up to 10 seconds for all CPUs to do the backtrace */
        for (i = 0; i < 10 * 1000; i++) {
                if (cpumask_empty(to_cpumask(backtrace_mask)))
                        break;
                mdelay(1);
+               touch_softlockup_watchdog();
        }
 
        clear_bit(0, &backtrace_flag);
        smp_mb__after_atomic();
+       put_cpu();
 }
 
 static int
index f3a1f04..5848744 100644 (file)
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
        u32 eax;
        u8 ret = 0;
        int idled = 0;
-       int polling;
        int err = 0;
 
        if (!need_resched()) {
index a800290..f9e4fdd 100644 (file)
@@ -370,6 +370,17 @@ static void init_intel(struct cpuinfo_x86 *c)
         */
        detect_extended_topology(c);
 
+       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
+               /*
+                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
+                * detection.
+                */
+               c->x86_max_cores = intel_num_cpu_cores(c);
+#ifdef CONFIG_X86_32
+               detect_ht(c);
+#endif
+       }
+
        l2 = init_intel_cacheinfo(c);
        if (c->cpuid_level > 9) {
                unsigned eax = cpuid_eax(10);
@@ -438,17 +449,6 @@ static void init_intel(struct cpuinfo_x86 *c)
                set_cpu_cap(c, X86_FEATURE_P3);
 #endif
 
-       if (!cpu_has(c, X86_FEATURE_XTOPOLOGY)) {
-               /*
-                * let's use the legacy cpuid vector 0x1 and 0x4 for topology
-                * detection.
-                */
-               c->x86_max_cores = intel_num_cpu_cores(c);
-#ifdef CONFIG_X86_32
-               detect_ht(c);
-#endif
-       }
-
        /* Work around errata */
        srat_detect_node(c);
 
index a952e9c..9c8f739 100644 (file)
@@ -730,6 +730,18 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
 #endif
        }
 
+#ifdef CONFIG_X86_HT
+       /*
+        * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
+        * turns means that the only possibility is SMT (as indicated in
+        * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
+        * that SMT shares all caches, we can unconditionally set cpu_llc_id to
+        * c->phys_proc_id.
+        */
+       if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
+               per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
+#endif
+
        c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
 
        return l2;
index bb92f38..9a79c8d 100644 (file)
@@ -2451,6 +2451,12 @@ static __init int mcheck_init_device(void)
        for_each_online_cpu(i) {
                err = mce_device_create(i);
                if (err) {
+                       /*
+                        * Register notifier anyway (and do not unreg it) so
+                        * that we don't leave undeleted timers, see notifier
+                        * callback above.
+                        */
+                       __register_hotcpu_notifier(&mce_cpu_notifier);
                        cpu_notifier_register_done();
                        goto err_device_create;
                }
@@ -2471,10 +2477,6 @@ static __init int mcheck_init_device(void)
 err_register:
        unregister_syscore_ops(&mce_syscore_ops);
 
-       cpu_notifier_register_begin();
-       __unregister_hotcpu_notifier(&mce_cpu_notifier);
-       cpu_notifier_register_done();
-
 err_device_create:
        /*
         * We didn't keep track of which devices were created above, but
index 2bdfbff..2879ecd 100644 (file)
@@ -118,6 +118,9 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
                        continue;
                if (event->attr.config1 & ~er->valid_mask)
                        return -EINVAL;
+               /* Check if the extra msrs can be safely accessed*/
+               if (!er->extra_msr_access)
+                       return -ENXIO;
 
                reg->idx = er->idx;
                reg->config = event->attr.config1;
index 3b2f9bd..8ade931 100644 (file)
@@ -295,14 +295,16 @@ struct extra_reg {
        u64                     config_mask;
        u64                     valid_mask;
        int                     idx;  /* per_xxx->regs[] reg index */
+       bool                    extra_msr_access;
 };
 
 #define EVENT_EXTRA_REG(e, ms, m, vm, i) {     \
-       .event = (e),           \
-       .msr = (ms),            \
-       .config_mask = (m),     \
-       .valid_mask = (vm),     \
-       .idx = EXTRA_REG_##i,   \
+       .event = (e),                   \
+       .msr = (ms),                    \
+       .config_mask = (m),             \
+       .valid_mask = (vm),             \
+       .idx = EXTRA_REG_##i,           \
+       .extra_msr_access = true,       \
        }
 
 #define INTEL_EVENT_EXTRA_REG(event, msr, vm, idx)     \
index adb02aa..2502d0d 100644 (file)
@@ -1381,6 +1381,15 @@ again:
 
        intel_pmu_lbr_read();
 
+       /*
+        * CondChgd bit 63 doesn't mean any overflow status. Ignore
+        * and clear the bit.
+        */
+       if (__test_and_clear_bit(63, (unsigned long *)&status)) {
+               if (!status)
+                       goto done;
+       }
+
        /*
         * PEBS overflow sets bit 62 in the global status register
         */
@@ -2173,6 +2182,41 @@ static void intel_snb_check_microcode(void)
        }
 }
 
+/*
+ * Under certain circumstances, access certain MSR may cause #GP.
+ * The function tests if the input MSR can be safely accessed.
+ */
+static bool check_msr(unsigned long msr, u64 mask)
+{
+       u64 val_old, val_new, val_tmp;
+
+       /*
+        * Read the current value, change it and read it back to see if it
+        * matches, this is needed to detect certain hardware emulators
+        * (qemu/kvm) that don't trap on the MSR access and always return 0s.
+        */
+       if (rdmsrl_safe(msr, &val_old))
+               return false;
+
+       /*
+        * Only change the bits which can be updated by wrmsrl.
+        */
+       val_tmp = val_old ^ mask;
+       if (wrmsrl_safe(msr, val_tmp) ||
+           rdmsrl_safe(msr, &val_new))
+               return false;
+
+       if (val_new != val_tmp)
+               return false;
+
+       /* Here it's sure that the MSR can be safely accessed.
+        * Restore the old value and return.
+        */
+       wrmsrl(msr, val_old);
+
+       return true;
+}
+
 static __init void intel_sandybridge_quirk(void)
 {
        x86_pmu.check_microcode = intel_snb_check_microcode;
@@ -2262,7 +2306,8 @@ __init int intel_pmu_init(void)
        union cpuid10_ebx ebx;
        struct event_constraint *c;
        unsigned int unused;
-       int version;
+       struct extra_reg *er;
+       int version, i;
 
        if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
                switch (boot_cpu_data.x86) {
@@ -2465,6 +2510,9 @@ __init int intel_pmu_init(void)
        case 62: /* IvyBridge EP */
                memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
                       sizeof(hw_cache_event_ids));
+               /* dTLB-load-misses on IVB is different than SNB */
+               hw_cache_event_ids[C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = 0x8108; /* DTLB_LOAD_MISSES.DEMAND_LD_MISS_CAUSES_A_WALK */
+
                memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs,
                       sizeof(hw_cache_extra_regs));
 
@@ -2565,6 +2613,34 @@ __init int intel_pmu_init(void)
                }
        }
 
+       /*
+        * Access LBR MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support LBR MSR
+        * Check all LBT MSR here.
+        * Disable LBR access if any LBR MSRs can not be accessed.
+        */
+       if (x86_pmu.lbr_nr && !check_msr(x86_pmu.lbr_tos, 0x3UL))
+               x86_pmu.lbr_nr = 0;
+       for (i = 0; i < x86_pmu.lbr_nr; i++) {
+               if (!(check_msr(x86_pmu.lbr_from + i, 0xffffUL) &&
+                     check_msr(x86_pmu.lbr_to + i, 0xffffUL)))
+                       x86_pmu.lbr_nr = 0;
+       }
+
+       /*
+        * Access extra MSR may cause #GP under certain circumstances.
+        * E.g. KVM doesn't support offcore event
+        * Check all extra_regs here.
+        */
+       if (x86_pmu.extra_regs) {
+               for (er = x86_pmu.extra_regs; er->msr; er++) {
+                       er->extra_msr_access = check_msr(er->msr, 0x1ffUL);
+                       /* Disable LBR select mapping */
+                       if ((er->idx == EXTRA_REG_LBR) && !er->extra_msr_access)
+                               x86_pmu.lbr_sel_map = NULL;
+               }
+       }
+
        /* Support full width counters using alternative MSR range */
        if (x86_pmu.intel_cap.full_width_write) {
                x86_pmu.max_period = x86_pmu.cntval_mask;
index 980970c..696ade3 100644 (file)
@@ -311,9 +311,11 @@ static int alloc_bts_buffer(int cpu)
        if (!x86_pmu.bts)
                return 0;
 
-       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL, node);
-       if (unlikely(!buffer))
+       buffer = kzalloc_node(BTS_BUFFER_SIZE, GFP_KERNEL | __GFP_NOWARN, node);
+       if (unlikely(!buffer)) {
+               WARN_ONCE(1, "%s: BTS buffer allocation failure\n", __func__);
                return -ENOMEM;
+       }
 
        max = BTS_BUFFER_SIZE / BTS_RECORD_SIZE;
        thresh = max / 16;
index 65bbbea..ae6552a 100644 (file)
@@ -550,16 +550,16 @@ static struct extra_reg snbep_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0x6),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0135, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0335, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4135, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4335, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4435, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4835, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a35, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5035, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x8),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x8),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xc),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xc),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0xa),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0xa),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4436, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4836, 0xffff, 0x2),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4a36, 0xffff, 0x2),
@@ -1222,6 +1222,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(SNBEP_CBO_PMON_CTL_TID_EN,
                                  SNBEP_CBO_PMON_CTL_TID_EN, 0x1),
        SNBEP_CBO_EVENT_EXTRA_REG(0x1031, 0x10ff, 0x2),
+
        SNBEP_CBO_EVENT_EXTRA_REG(0x1134, 0xffff, 0x4),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4134, 0xffff, 0xc),
        SNBEP_CBO_EVENT_EXTRA_REG(0x5134, 0xffff, 0xc),
@@ -1245,7 +1246,7 @@ static struct extra_reg ivt_uncore_cbox_extra_regs[] = {
        SNBEP_CBO_EVENT_EXTRA_REG(0x8335, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x0336, 0xffff, 0x10),
-       SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
+       SNBEP_CBO_EVENT_EXTRA_REG(0x2136, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x2336, 0xffff, 0x10),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4136, 0xffff, 0x18),
        SNBEP_CBO_EVENT_EXTRA_REG(0x4336, 0xffff, 0x18),
index f0da82b..0d0c9d4 100644 (file)
@@ -423,8 +423,9 @@ sysenter_past_esp:
        jnz sysenter_audit
 sysenter_do_call:
        cmpl $(NR_syscalls), %eax
-       jae syscall_badsys
+       jae sysenter_badsys
        call *sys_call_table(,%eax,4)
+sysenter_after_call:
        movl %eax,PT_EAX(%esp)
        LOCKDEP_SYS_EXIT
        DISABLE_INTERRUPTS(CLBR_ANY)
@@ -501,6 +502,7 @@ ENTRY(system_call)
        jae syscall_badsys
 syscall_call:
        call *sys_call_table(,%eax,4)
+syscall_after_call:
        movl %eax,PT_EAX(%esp)          # store the return value
 syscall_exit:
        LOCKDEP_SYS_EXIT
@@ -674,8 +676,13 @@ syscall_fault:
 END(syscall_fault)
 
 syscall_badsys:
-       movl $-ENOSYS,PT_EAX(%esp)
-       jmp resume_userspace
+       movl $-ENOSYS,%eax
+       jmp syscall_after_call
+END(syscall_badsys)
+
+sysenter_badsys:
+       movl $-ENOSYS,%eax
+       jmp sysenter_after_call
 END(syscall_badsys)
        CFI_ENDPROC
 
index 6afbb16..94d857f 100644 (file)
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
        if (!pud_present(pud)) {
                pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
                pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
-               paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
+               paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PUD_CLONES; n++)
                        set_pud(&pud_p[n], pud);
        }
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
        if (!pmd_present(pmd)) {
                pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
                pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
-               paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
+               paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
                for (n = 0; n < ESPFIX_PMD_CLONES; n++)
                        set_pmd(&pmd_p[n], pmd);
        }
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
        pte_p = pte_offset_kernel(&pmd, addr);
        stack_page = (void *)__get_free_page(GFP_KERNEL);
        pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
-       paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
        for (n = 0; n < ESPFIX_PTE_CLONES; n++)
                set_pte(&pte_p[n*PTE_STRIDE], pte);
 
index 7596df6..67e6d19 100644 (file)
@@ -574,6 +574,9 @@ int kprobe_int3_handler(struct pt_regs *regs)
        struct kprobe *p;
        struct kprobe_ctlblk *kcb;
 
+       if (user_mode_vm(regs))
+               return 0;
+
        addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
        /*
         * We don't want to be preempted for the entire
index a0da58d..2851d63 100644 (file)
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
 
                /* Set up to return from userspace.  */
                restorer = current->mm->context.vdso +
-                       selected_vdso32->sym___kernel_sigreturn;
+                       selected_vdso32->sym___kernel_rt_sigreturn;
                if (ksig->ka.sa.sa_flags & SA_RESTORER)
                        restorer = ksig->ka.sa.sa_restorer;
                put_user_ex(restorer, &frame->pretcode);
index c6eb418..0d0e922 100644 (file)
@@ -343,6 +343,7 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
        if (poke_int3_handler(regs))
                return;
 
+       prev_state = exception_enter();
 #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
        if (kgdb_ll_trap(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
                                SIGTRAP) == NOTIFY_STOP)
@@ -351,9 +352,8 @@ dotraplinkage void notrace do_int3(struct pt_regs *regs, long error_code)
 
 #ifdef CONFIG_KPROBES
        if (kprobe_int3_handler(regs))
-               return;
+               goto exit;
 #endif
-       prev_state = exception_enter();
 
        if (notify_die(DIE_INT3, "int3", regs, error_code, X86_TRAP_BP,
                        SIGTRAP) == NOTIFY_STOP)
@@ -433,6 +433,8 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        unsigned long dr6;
        int si_code;
 
+       prev_state = exception_enter();
+
        get_debugreg(dr6, 6);
 
        /* Filter out all the reserved bits which are preset to 1 */
@@ -465,7 +467,6 @@ dotraplinkage void do_debug(struct pt_regs *regs, long error_code)
        if (kprobe_debug_handler(regs))
                goto exit;
 #endif
-       prev_state = exception_enter();
 
        if (notify_die(DIE_DEBUG, "debug", regs, (long)&dr6, error_code,
                                                        SIGTRAP) == NOTIFY_STOP)
index 57e5ce1..ea03031 100644 (file)
@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
                tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
                if (!(freq->flags & CPUFREQ_CONST_LOOPS))
                        mark_tsc_unstable("cpufreq changes");
-       }
 
-       set_cyc2ns_scale(tsc_khz, freq->cpu);
+               set_cyc2ns_scale(tsc_khz, freq->cpu);
+       }
 
        return 0;
 }
index ec8366c..b5e994a 100644 (file)
@@ -1462,6 +1462,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
                 */
                if (var->unusable)
                        var->db = 0;
+               var->dpl = to_svm(vcpu)->vmcb->save.cpl;
                break;
        }
 }
index f32a025..ef432f8 100644 (file)
@@ -1898,7 +1898,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
                if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
                        break;
                gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
-               if (kvm_write_guest(kvm, data,
+               if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
                        &tsc_ref, sizeof(tsc_ref)))
                        return 1;
                mark_page_dirty(kvm, gfn);
@@ -5887,6 +5887,18 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
                        kvm_x86_ops->set_nmi(vcpu);
                }
        } else if (kvm_cpu_has_injectable_intr(vcpu)) {
+               /*
+                * Because interrupts can be injected asynchronously, we are
+                * calling check_nested_events again here to avoid a race condition.
+                * See https://lkml.org/lkml/2014/7/2/60 for discussion about this
+                * proposal and current concerns.  Perhaps we should be setting
+                * KVM_REQ_EVENT only on certain events and not unconditionally?
+                */
+               if (is_guest_mode(vcpu) && kvm_x86_ops->check_nested_events) {
+                       r = kvm_x86_ops->check_nested_events(vcpu, req_int_win);
+                       if (r != 0)
+                               return r;
+               }
                if (kvm_x86_ops->interrupt_allowed(vcpu)) {
                        kvm_queue_interrupt(vcpu, kvm_cpu_get_interrupt(vcpu),
                                            false);
index 99bef86..5c8cb80 100644 (file)
@@ -211,10 +211,10 @@ struct jit_context {
        bool seen_ld_abs;
 };
 
-static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
+static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                  int oldproglen, struct jit_context *ctx)
 {
-       struct sock_filter_int *insn = bpf_prog->insnsi;
+       struct bpf_insn *insn = bpf_prog->insnsi;
        int insn_cnt = bpf_prog->len;
        u8 temp[64];
        int i;
@@ -235,7 +235,7 @@ static int do_jit(struct sk_filter *bpf_prog, int *addrs, u8 *image,
        /* mov qword ptr [rbp-X],rbx */
        EMIT3_off32(0x48, 0x89, 0x9D, -stacksize);
 
-       /* sk_convert_filter() maps classic BPF register X to R7 and uses R8
+       /* bpf_convert_filter() maps classic BPF register X to R7 and uses R8
         * as temporary, so all tcpdump filters need to spill/fill R7(r13) and
         * R8(r14). R9(r15) spill could be made conditional, but there is only
         * one 'bpf_error' return path out of helper functions inside bpf_jit.S
@@ -841,7 +841,7 @@ common_load:                ctx->seen_ld_abs = true;
                        /* By design x64 JIT should support all BPF instructions
                         * This error will be seen if new instruction was added
                         * to interpreter, but not to JIT
-                        * or if there is junk in sk_filter
+                        * or if there is junk in bpf_prog
                         */
                        pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
                        return -EINVAL;
@@ -862,11 +862,11 @@ common_load:              ctx->seen_ld_abs = true;
        return proglen;
 }
 
-void bpf_jit_compile(struct sk_filter *prog)
+void bpf_jit_compile(struct bpf_prog *prog)
 {
 }
 
-void bpf_int_jit_compile(struct sk_filter *prog)
+void bpf_int_jit_compile(struct bpf_prog *prog)
 {
        struct bpf_binary_header *header = NULL;
        int proglen, oldproglen = 0;
@@ -932,7 +932,7 @@ out:
 
 static void bpf_jit_free_deferred(struct work_struct *work)
 {
-       struct sk_filter *fp = container_of(work, struct sk_filter, work);
+       struct bpf_prog *fp = container_of(work, struct bpf_prog, work);
        unsigned long addr = (unsigned long)fp->bpf_func & PAGE_MASK;
        struct bpf_binary_header *header = (void *)addr;
 
@@ -941,7 +941,7 @@ static void bpf_jit_free_deferred(struct work_struct *work)
        kfree(fp);
 }
 
-void bpf_jit_free(struct sk_filter *fp)
+void bpf_jit_free(struct bpf_prog *fp)
 {
        if (fp->jited) {
                INIT_WORK(&fp->work, bpf_jit_free_deferred);
index 3c0809a..61b04fe 100644 (file)
@@ -11,7 +11,6 @@ VDSO32-$(CONFIG_COMPAT)               := y
 
 # files to link into the vdso
 vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
-vobjs-nox32 := vdso-fakesections.o
 
 # files to link into kernel
 obj-y                          += vma.o
@@ -67,7 +66,8 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
 #
 CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
        $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-       -fno-omit-frame-pointer -foptimize-sibling-calls
+       -fno-omit-frame-pointer -foptimize-sibling-calls \
+       -DDISABLE_BRANCH_PROFILING
 
 $(vobjs): KBUILD_CFLAGS += $(CFL)
 
@@ -134,7 +134,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
 
 targets += vdso32/vdso32.lds
 targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
-targets += vdso32/vclock_gettime.o
+targets += vdso32/vclock_gettime.o vdso32/vdso-fakesections.o
 
 $(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
 
@@ -150,11 +150,13 @@ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
 KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
 KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
 KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
 $(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
 
 $(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
                                 $(obj)/vdso32/vdso32.lds \
                                 $(obj)/vdso32/vclock_gettime.o \
+                                $(obj)/vdso32/vdso-fakesections.o \
                                 $(obj)/vdso32/note.o \
                                 $(obj)/vdso32/%.o
        $(call if_changed,vdso)
@@ -169,14 +171,24 @@ quiet_cmd_vdso = VDSO    $@
                 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
 
 VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
-       -Wl,-Bsymbolic $(LTO_CFLAGS)
+       $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
 GCOV_PROFILE := n
 
 #
-# Install the unstripped copies of vdso*.so.
+# Install the unstripped copies of vdso*.so.  If our toolchain supports
+# build-id, install .build-id links as well.
 #
 quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
-      cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%)
+define cmd_vdso_install
+       cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+       if readelf -n $< |grep -q 'Build ID'; then \
+         buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+         first=`echo $$buildid | cut -b-2`; \
+         last=`echo $$buildid | cut -b3-`; \
+         mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+         ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+       fi
+endef
 
 vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
 
index b2e4f49..9793322 100644 (file)
@@ -11,9 +11,6 @@
  * Check with readelf after changing.
  */
 
-/* Disable profiling for userspace code: */
-#define DISABLE_BRANCH_PROFILING
-
 #include <uapi/linux/time.h>
 #include <asm/vgtod.h>
 #include <asm/hpet.h>
index cb8a8d7..aa5fbfa 100644 (file)
@@ -2,31 +2,20 @@
  * Copyright 2014 Andy Lutomirski
  * Subject to the GNU Public License, v.2
  *
- * Hack to keep broken Go programs working.
- *
- * The Go runtime had a couple of bugs: it would read the section table to try
- * to figure out how many dynamic symbols there were (it shouldn't have looked
- * at the section table at all) and, if there were no SHT_SYNDYM section table
- * entry, it would use an uninitialized value for the number of symbols.  As a
- * workaround, we supply a minimal section table.  vdso2c will adjust the
- * in-memory image so that "vdso_fake_sections" becomes the section table.
- *
- * The bug was introduced by:
- * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
- * and is being addressed in the Go runtime in this issue:
- * https://code.google.com/p/go/issues/detail?id=8197
+ * String table for loadable section headers.  See vdso2c.h for why
+ * this exists.
  */
 
-#ifndef __x86_64__
-#error This hack is specific to the 64-bit vDSO
-#endif
-
-#include <linux/elf.h>
-
-extern const __visible struct elf64_shdr vdso_fake_sections[];
-const __visible struct elf64_shdr vdso_fake_sections[] = {
-       {
-               .sh_type = SHT_DYNSYM,
-               .sh_entsize = sizeof(Elf64_Sym),
-       }
-};
+const char fake_shstrtab[] __attribute__((section(".fake_shstrtab"))) =
+       ".hash\0"
+       ".dynsym\0"
+       ".dynstr\0"
+       ".gnu.version\0"
+       ".gnu.version_d\0"
+       ".dynamic\0"
+       ".rodata\0"
+       ".fake_shstrtab\0"  /* Yay, self-referential code. */
+       ".note\0"
+       ".eh_frame_hdr\0"
+       ".eh_frame\0"
+       ".text";
index 2ec72f6..9197544 100644 (file)
@@ -6,6 +6,16 @@
  * This script controls its layout.
  */
 
+#if defined(BUILD_VDSO64)
+# define SHDR_SIZE 64
+#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
+# define SHDR_SIZE 40
+#else
+# error unknown VDSO target
+#endif
+
+#define NUM_FAKE_SHDRS 13
+
 SECTIONS
 {
        . = SIZEOF_HEADERS;
@@ -18,35 +28,52 @@ SECTIONS
        .gnu.version_d  : { *(.gnu.version_d) }
        .gnu.version_r  : { *(.gnu.version_r) }
 
+       .dynamic        : { *(.dynamic) }               :text   :dynamic
+
+       .rodata         : {
+               *(.rodata*)
+               *(.data*)
+               *(.sdata*)
+               *(.got.plt) *(.got)
+               *(.gnu.linkonce.d.*)
+               *(.bss*)
+               *(.dynbss*)
+               *(.gnu.linkonce.b.*)
+
+               /*
+                * Ideally this would live in a C file, but that won't
+                * work cleanly for x32 until we start building the x32
+                * C code using an x32 toolchain.
+                */
+               VDSO_FAKE_SECTION_TABLE_START = .;
+               . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
+               VDSO_FAKE_SECTION_TABLE_END = .;
+       }                                               :text
+
+       .fake_shstrtab  : { *(.fake_shstrtab) }         :text
+
+
        .note           : { *(.note.*) }                :text   :note
 
        .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
        .eh_frame       : { KEEP (*(.eh_frame)) }       :text
 
-       .dynamic        : { *(.dynamic) }               :text   :dynamic
-
-       .rodata         : { *(.rodata*) }               :text
-       .data           : {
-             *(.data*)
-             *(.sdata*)
-             *(.got.plt) *(.got)
-             *(.gnu.linkonce.d.*)
-             *(.bss*)
-             *(.dynbss*)
-             *(.gnu.linkonce.b.*)
-       }
-
-       .altinstructions        : { *(.altinstructions) }
-       .altinstr_replacement   : { *(.altinstr_replacement) }
 
        /*
-        * Align the actual code well away from the non-instruction data.
-        * This is the best thing for the I-cache.
+        * Text is well-separated from actual data: there's plenty of
+        * stuff that isn't used at runtime in between.
         */
-       . = ALIGN(0x100);
 
        .text           : { *(.text*) }                 :text   =0x90909090,
 
+       /*
+        * At the end so that eu-elflint stays happy when vdso2c strips
+        * these.  A better implementation would avoid allocating space
+        * for these.
+        */
+       .altinstructions        : { *(.altinstructions) }       :text
+       .altinstr_replacement   : { *(.altinstr_replacement) }  :text
+
        /*
         * The remainder of the vDSO consists of special pages that are
         * shared between the kernel and userspace.  It needs to be at the
@@ -75,6 +102,7 @@ SECTIONS
        /DISCARD/ : {
                *(.discard)
                *(.discard.*)
+               *(__bug_table)
        }
 }
 
index 75e3404..6807932 100644 (file)
@@ -6,6 +6,8 @@
  * the DSO.
  */
 
+#define BUILD_VDSO64
+
 #include "vdso-layout.lds.S"
 
 /*
index 7a6bf50..238dbe8 100644 (file)
@@ -23,6 +23,8 @@ enum {
        sym_vvar_page,
        sym_hpet_page,
        sym_end_mapping,
+       sym_VDSO_FAKE_SECTION_TABLE_START,
+       sym_VDSO_FAKE_SECTION_TABLE_END,
 };
 
 const int special_pages[] = {
@@ -30,15 +32,26 @@ const int special_pages[] = {
        sym_hpet_page,
 };
 
-char const * const required_syms[] = {
-       [sym_vvar_page] = "vvar_page",
-       [sym_hpet_page] = "hpet_page",
-       [sym_end_mapping] = "end_mapping",
-       "VDSO32_NOTE_MASK",
-       "VDSO32_SYSENTER_RETURN",
-       "__kernel_vsyscall",
-       "__kernel_sigreturn",
-       "__kernel_rt_sigreturn",
+struct vdso_sym {
+       const char *name;
+       bool export;
+};
+
+struct vdso_sym required_syms[] = {
+       [sym_vvar_page] = {"vvar_page", true},
+       [sym_hpet_page] = {"hpet_page", true},
+       [sym_end_mapping] = {"end_mapping", true},
+       [sym_VDSO_FAKE_SECTION_TABLE_START] = {
+               "VDSO_FAKE_SECTION_TABLE_START", false
+       },
+       [sym_VDSO_FAKE_SECTION_TABLE_END] = {
+               "VDSO_FAKE_SECTION_TABLE_END", false
+       },
+       {"VDSO32_NOTE_MASK", true},
+       {"VDSO32_SYSENTER_RETURN", true},
+       {"__kernel_vsyscall", true},
+       {"__kernel_sigreturn", true},
+       {"__kernel_rt_sigreturn", true},
 };
 
 __attribute__((format(printf, 1, 2))) __attribute__((noreturn))
@@ -83,37 +96,21 @@ extern void bad_put_le(void);
 
 #define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
 
-#define BITS 64
-#define GOFUNC go64
-#define Elf_Ehdr Elf64_Ehdr
-#define Elf_Shdr Elf64_Shdr
-#define Elf_Phdr Elf64_Phdr
-#define Elf_Sym Elf64_Sym
-#define Elf_Dyn Elf64_Dyn
+#define BITSFUNC3(name, bits) name##bits
+#define BITSFUNC2(name, bits) BITSFUNC3(name, bits)
+#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS)
+
+#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
+#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
+#define ELF_BITS 64
 #include "vdso2c.h"
-#undef BITS
-#undef GOFUNC
-#undef Elf_Ehdr
-#undef Elf_Shdr
-#undef Elf_Phdr
-#undef Elf_Sym
-#undef Elf_Dyn
-
-#define BITS 32
-#define GOFUNC go32
-#define Elf_Ehdr Elf32_Ehdr
-#define Elf_Shdr Elf32_Shdr
-#define Elf_Phdr Elf32_Phdr
-#define Elf_Sym Elf32_Sym
-#define Elf_Dyn Elf32_Dyn
+#undef ELF_BITS
+
+#define ELF_BITS 32
 #include "vdso2c.h"
-#undef BITS
-#undef GOFUNC
-#undef Elf_Ehdr
-#undef Elf_Shdr
-#undef Elf_Phdr
-#undef Elf_Sym
-#undef Elf_Dyn
+#undef ELF_BITS
 
 static void go(void *addr, size_t len, FILE *outfile, const char *name)
 {
index c6eefaf..11b65d4 100644 (file)
  * are built for 32-bit userspace.
  */
 
-static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
+/*
+ * We're writing a section table for a few reasons:
+ *
+ * The Go runtime had a couple of bugs: it would read the section
+ * table to try to figure out how many dynamic symbols there were (it
+ * shouldn't have looked at the section table at all) and, if there
+ * were no SHT_SYNDYM section table entry, it would use an
+ * uninitialized value for the number of symbols.  An empty DYNSYM
+ * table would work, but I see no reason not to write a valid one (and
+ * keep full performance for old Go programs).  This hack is only
+ * needed on x86_64.
+ *
+ * The bug was introduced on 2012-08-31 by:
+ * https://code.google.com/p/go/source/detail?r=56ea40aac72b
+ * and was fixed on 2014-06-13 by:
+ * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
+ *
+ * Binutils has issues debugging the vDSO: it reads the section table to
+ * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
+ * would break build-id if we removed the section table.  Binutils
+ * also requires that shstrndx != 0.  See:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
+ *
+ * elfutils might not look for PT_NOTE if there is a section table at
+ * all.  I don't know whether this matters for any practical purpose.
+ *
+ * For simplicity, rather than hacking up a partial section table, we
+ * just write a mostly complete one.  We omit non-dynamic symbols,
+ * though, since they're rather large.
+ *
+ * Once binutils gets fixed, we might be able to drop this for all but
+ * the 64-bit vdso, since build-id only works in kernel RPMs, and
+ * systems that update to new enough kernel RPMs will likely update
+ * binutils in sync.  build-id has never worked for home-built kernel
+ * RPMs without manual symlinking, and I suspect that no one ever does
+ * that.
+ */
+struct BITSFUNC(fake_sections)
+{
+       ELF(Shdr) *table;
+       unsigned long table_offset;
+       int count, max_count;
+
+       int in_shstrndx;
+       unsigned long shstr_offset;
+       const char *shstrtab;
+       size_t shstrtab_len;
+
+       int out_shstrndx;
+};
+
+static unsigned int BITSFUNC(find_shname)(struct BITSFUNC(fake_sections) *out,
+                                         const char *name)
+{
+       const char *outname = out->shstrtab;
+       while (outname - out->shstrtab < out->shstrtab_len) {
+               if (!strcmp(name, outname))
+                       return (outname - out->shstrtab) + out->shstr_offset;
+               outname += strlen(outname) + 1;
+       }
+
+       if (*name)
+               printf("Warning: could not find output name \"%s\"\n", name);
+       return out->shstr_offset + out->shstrtab_len - 1;  /* Use a null. */
+}
+
+static void BITSFUNC(init_sections)(struct BITSFUNC(fake_sections) *out)
+{
+       if (!out->in_shstrndx)
+               fail("didn't find the fake shstrndx\n");
+
+       memset(out->table, 0, out->max_count * sizeof(ELF(Shdr)));
+
+       if (out->max_count < 1)
+               fail("we need at least two fake output sections\n");
+
+       PUT_LE(&out->table[0].sh_type, SHT_NULL);
+       PUT_LE(&out->table[0].sh_name, BITSFUNC(find_shname)(out, ""));
+
+       out->count = 1;
+}
+
+static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
+                                  int in_idx, const ELF(Shdr) *in,
+                                  const char *name)
+{
+       uint64_t flags = GET_LE(&in->sh_flags);
+
+       bool copy = flags & SHF_ALLOC &&
+               (GET_LE(&in->sh_size) ||
+                (GET_LE(&in->sh_type) != SHT_RELA &&
+                 GET_LE(&in->sh_type) != SHT_REL)) &&
+               strcmp(name, ".altinstructions") &&
+               strcmp(name, ".altinstr_replacement");
+
+       if (!copy)
+               return;
+
+       if (out->count >= out->max_count)
+               fail("too many copied sections (max = %d)\n", out->max_count);
+
+       if (in_idx == out->in_shstrndx)
+               out->out_shstrndx = out->count;
+
+       out->table[out->count] = *in;
+       PUT_LE(&out->table[out->count].sh_name,
+              BITSFUNC(find_shname)(out, name));
+
+       /* elfutils requires that a strtab have the correct type. */
+       if (!strcmp(name, ".fake_shstrtab"))
+               PUT_LE(&out->table[out->count].sh_type, SHT_STRTAB);
+
+       out->count++;
+}
+
+static void BITSFUNC(go)(void *addr, size_t len,
+                        FILE *outfile, const char *name)
 {
        int found_load = 0;
        unsigned long load_size = -1;  /* Work around bogus warning */
        unsigned long data_size;
-       Elf_Ehdr *hdr = (Elf_Ehdr *)addr;
+       ELF(Ehdr) *hdr = (ELF(Ehdr) *)addr;
        int i;
        unsigned long j;
-       Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+       ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
                *alt_sec = NULL;
-       Elf_Dyn *dyn = 0, *dyn_end = 0;
+       ELF(Dyn) *dyn = 0, *dyn_end = 0;
        const char *secstrings;
        uint64_t syms[NSYMS] = {};
 
-       uint64_t fake_sections_value = 0, fake_sections_size = 0;
+       struct BITSFUNC(fake_sections) fake_sections = {};
 
-       Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff));
+       ELF(Phdr) *pt = (ELF(Phdr) *)(addr + GET_LE(&hdr->e_phoff));
 
        /* Walk the segment table. */
        for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
@@ -51,7 +167,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
        for (i = 0; dyn + i < dyn_end &&
                     GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
                typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
-               if (tag == DT_REL || tag == DT_RELSZ ||
+               if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
                    tag == DT_RELENT || tag == DT_TEXTREL)
                        fail("vdso image contains dynamic relocations\n");
        }
@@ -61,7 +177,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
                GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
        secstrings = addr + GET_LE(&secstrings_hdr->sh_offset);
        for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
-               Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) +
+               ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
                        GET_LE(&hdr->e_shentsize) * i;
                if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
                        symtab_hdr = sh;
@@ -82,29 +198,63 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
             i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
             i++) {
                int k;
-               Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
+               ELF(Sym) *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
                        GET_LE(&symtab_hdr->sh_entsize) * i;
                const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
                        GET_LE(&sym->st_name);
 
                for (k = 0; k < NSYMS; k++) {
-                       if (!strcmp(name, required_syms[k])) {
+                       if (!strcmp(name, required_syms[k].name)) {
                                if (syms[k]) {
                                        fail("duplicate symbol %s\n",
-                                            required_syms[k]);
+                                            required_syms[k].name);
                                }
                                syms[k] = GET_LE(&sym->st_value);
                        }
                }
 
-               if (!strcmp(name, "vdso_fake_sections")) {
-                       if (fake_sections_value)
-                               fail("duplicate vdso_fake_sections\n");
-                       fake_sections_value = GET_LE(&sym->st_value);
-                       fake_sections_size = GET_LE(&sym->st_size);
+               if (!strcmp(name, "fake_shstrtab")) {
+                       ELF(Shdr) *sh;
+
+                       fake_sections.in_shstrndx = GET_LE(&sym->st_shndx);
+                       fake_sections.shstrtab = addr + GET_LE(&sym->st_value);
+                       fake_sections.shstrtab_len = GET_LE(&sym->st_size);
+                       sh = addr + GET_LE(&hdr->e_shoff) +
+                               GET_LE(&hdr->e_shentsize) *
+                               fake_sections.in_shstrndx;
+                       fake_sections.shstr_offset = GET_LE(&sym->st_value) -
+                               GET_LE(&sh->sh_addr);
                }
        }
 
+       /* Build the output section table. */
+       if (!syms[sym_VDSO_FAKE_SECTION_TABLE_START] ||
+           !syms[sym_VDSO_FAKE_SECTION_TABLE_END])
+               fail("couldn't find fake section table\n");
+       if ((syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
+            syms[sym_VDSO_FAKE_SECTION_TABLE_START]) % sizeof(ELF(Shdr)))
+               fail("fake section table size isn't a multiple of sizeof(Shdr)\n");
+       fake_sections.table = addr + syms[sym_VDSO_FAKE_SECTION_TABLE_START];
+       fake_sections.table_offset = syms[sym_VDSO_FAKE_SECTION_TABLE_START];
+       fake_sections.max_count = (syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
+                                  syms[sym_VDSO_FAKE_SECTION_TABLE_START]) /
+               sizeof(ELF(Shdr));
+
+       BITSFUNC(init_sections)(&fake_sections);
+       for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
+               ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
+                       GET_LE(&hdr->e_shentsize) * i;
+               BITSFUNC(copy_section)(&fake_sections, i, sh,
+                                      secstrings + GET_LE(&sh->sh_name));
+       }
+       if (!fake_sections.out_shstrndx)
+               fail("didn't generate shstrndx?!?\n");
+
+       PUT_LE(&hdr->e_shoff, fake_sections.table_offset);
+       PUT_LE(&hdr->e_shentsize, sizeof(ELF(Shdr)));
+       PUT_LE(&hdr->e_shnum, fake_sections.count);
+       PUT_LE(&hdr->e_shstrndx, fake_sections.out_shstrndx);
+
        /* Validate mapping addresses. */
        for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
                if (!syms[i])
@@ -112,25 +262,17 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
 
                if (syms[i] % 4096)
                        fail("%s must be a multiple of 4096\n",
-                            required_syms[i]);
+                            required_syms[i].name);
                if (syms[i] < data_size)
                        fail("%s must be after the text mapping\n",
-                            required_syms[i]);
+                            required_syms[i].name);
                if (syms[sym_end_mapping] < syms[i] + 4096)
-                       fail("%s overruns end_mapping\n", required_syms[i]);
+                       fail("%s overruns end_mapping\n",
+                            required_syms[i].name);
        }
        if (syms[sym_end_mapping] % 4096)
                fail("end_mapping must be a multiple of 4096\n");
 
-       /* Remove sections or use fakes */
-       if (fake_sections_size % sizeof(Elf_Shdr))
-               fail("vdso_fake_sections size is not a multiple of %ld\n",
-                    (long)sizeof(Elf_Shdr));
-       PUT_LE(&hdr->e_shoff, fake_sections_value);
-       PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
-       PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
-       PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
-
        if (!name) {
                fwrite(addr, load_size, 1, outfile);
                return;
@@ -168,9 +310,9 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
                        (unsigned long)GET_LE(&alt_sec->sh_size));
        }
        for (i = 0; i < NSYMS; i++) {
-               if (syms[i])
+               if (required_syms[i].export && syms[i])
                        fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
-                               required_syms[i], syms[i]);
+                               required_syms[i].name, syms[i]);
        }
        fprintf(outfile, "};\n");
 }
diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/vdso/vdso32/vdso-fakesections.c
new file mode 100644 (file)
index 0000000..541468e
--- /dev/null
@@ -0,0 +1 @@
+#include "../vdso-fakesections.c"
index 46b991b..697c11e 100644 (file)
@@ -6,6 +6,8 @@
  * the DSO.
  */
 
+#define BUILD_VDSOX32
+
 #include "vdso-layout.lds.S"
 
 /*
index e1513c4..5a5176d 100644 (file)
@@ -62,6 +62,9 @@ struct linux_binprm;
    Only used for the 64-bit and x32 vdsos. */
 static unsigned long vdso_addr(unsigned long start, unsigned len)
 {
+#ifdef CONFIG_X86_32
+       return 0;
+#else
        unsigned long addr, end;
        unsigned offset;
        end = (start + PMD_SIZE - 1) & PMD_MASK;
@@ -83,6 +86,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
        addr = align_vdso_addr(addr);
 
        return addr;
+#endif
 }
 
 static int map_vdso(const struct vdso_image *image, bool calculate_addr)
index f17b292..ffb101e 100644 (file)
@@ -1537,7 +1537,10 @@ asmlinkage __visible void __init xen_start_kernel(void)
        if (!xen_pvh_domain())
                pv_cpu_ops = xen_cpu_ops;
 
-       x86_init.resources.memory_setup = xen_memory_setup;
+       if (xen_feature(XENFEAT_auto_translated_physmap))
+               x86_init.resources.memory_setup = xen_auto_xlated_memory_setup;
+       else
+               x86_init.resources.memory_setup = xen_memory_setup;
        x86_init.oem.arch_setup = xen_arch_setup;
        x86_init.oem.banner = xen_banner;
 
index c985835..ebfa9b2 100644 (file)
 
 #include <linux/sched.h>
 #include <linux/mm.h>
+#include <linux/slab.h>
 #include <linux/vmalloc.h>
 
 #include <xen/interface/xen.h>
 #include <xen/page.h>
 #include <xen/grant_table.h>
+#include <xen/xen.h>
 
 #include <asm/pgtable.h>
 
-static int map_pte_fn(pte_t *pte, struct page *pmd_page,
-                     unsigned long addr, void *data)
+static struct gnttab_vm_area {
+       struct vm_struct *area;
+       pte_t **ptes;
+} gnttab_shared_vm_area, gnttab_status_vm_area;
+
+int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          void **__shared)
 {
-       unsigned long **frames = (unsigned long **)data;
+       void *shared = *__shared;
+       unsigned long addr;
+       unsigned long i;
 
-       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-       (*frames)++;
-       return 0;
-}
+       if (shared == NULL)
+               *__shared = shared = gnttab_shared_vm_area.area->addr;
 
-/*
- * This function is used to map shared frames to store grant status. It is
- * different from map_pte_fn above, the frames type here is uint64_t.
- */
-static int map_pte_fn_status(pte_t *pte, struct page *pmd_page,
-                            unsigned long addr, void *data)
-{
-       uint64_t **frames = (uint64_t **)data;
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i],
+                          mfn_pte(frames[i], PAGE_KERNEL));
+               addr += PAGE_SIZE;
+       }
 
-       set_pte_at(&init_mm, addr, pte, mfn_pte((*frames)[0], PAGE_KERNEL));
-       (*frames)++;
        return 0;
 }
 
-static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
-                       unsigned long addr, void *data)
+int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
+                          unsigned long max_nr_gframes,
+                          grant_status_t **__shared)
 {
+       grant_status_t *shared = *__shared;
+       unsigned long addr;
+       unsigned long i;
+
+       if (shared == NULL)
+               *__shared = shared = gnttab_status_vm_area.area->addr;
+
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i],
+                          mfn_pte(frames[i], PAGE_KERNEL));
+               addr += PAGE_SIZE;
+       }
 
-       set_pte_at(&init_mm, addr, pte, __pte(0));
        return 0;
 }
 
-int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes,
-                          unsigned long max_nr_gframes,
-                          void **__shared)
+void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
 {
-       int rc;
-       void *shared = *__shared;
+       pte_t **ptes;
+       unsigned long addr;
+       unsigned long i;
 
-       if (shared == NULL) {
-               struct vm_struct *area =
-                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-               BUG_ON(area == NULL);
-               shared = area->addr;
-               *__shared = shared;
-       }
+       if (shared == gnttab_status_vm_area.area->addr)
+               ptes = gnttab_status_vm_area.ptes;
+       else
+               ptes = gnttab_shared_vm_area.ptes;
 
-       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-                                PAGE_SIZE * nr_gframes,
-                                map_pte_fn, &frames);
-       return rc;
+       addr = (unsigned long)shared;
+
+       for (i = 0; i < nr_gframes; i++) {
+               set_pte_at(&init_mm, addr, ptes[i], __pte(0));
+               addr += PAGE_SIZE;
+       }
 }
 
-int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes,
-                          unsigned long max_nr_gframes,
-                          grant_status_t **__shared)
+static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames)
 {
-       int rc;
-       grant_status_t *shared = *__shared;
+       area->ptes = kmalloc(sizeof(pte_t *) * nr_frames, GFP_KERNEL);
+       if (area->ptes == NULL)
+               return -ENOMEM;
 
-       if (shared == NULL) {
-               /* No need to pass in PTE as we are going to do it
-                * in apply_to_page_range anyhow. */
-               struct vm_struct *area =
-                       alloc_vm_area(PAGE_SIZE * max_nr_gframes, NULL);
-               BUG_ON(area == NULL);
-               shared = area->addr;
-               *__shared = shared;
+       area->area = alloc_vm_area(PAGE_SIZE * nr_frames, area->ptes);
+       if (area->area == NULL) {
+               kfree(area->ptes);
+               return -ENOMEM;
        }
 
-       rc = apply_to_page_range(&init_mm, (unsigned long)shared,
-                                PAGE_SIZE * nr_gframes,
-                                map_pte_fn_status, &frames);
-       return rc;
+       return 0;
 }
 
-void arch_gnttab_unmap(void *shared, unsigned long nr_gframes)
+static void arch_gnttab_vfree(struct gnttab_vm_area *area)
+{
+       free_vm_area(area->area);
+       kfree(area->ptes);
+}
+
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status)
 {
-       apply_to_page_range(&init_mm, (unsigned long)shared,
-                           PAGE_SIZE * nr_gframes, unmap_pte_fn, NULL);
+       int ret;
+
+       if (!xen_pv_domain())
+               return 0;
+
+       ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * Always allocate the space for the status frames in case
+        * we're migrated to a host with V2 support.
+        */
+       ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status);
+       if (ret < 0)
+               goto err;
+
+       return 0;
+  err:
+       arch_gnttab_vfree(&gnttab_shared_vm_area);
+       return -ENOMEM;
 }
+
 #ifdef CONFIG_XEN_PVH
 #include <xen/balloon.h>
 #include <xen/events.h>
-#include <xen/xen.h>
 #include <linux/slab.h>
 static int __init xlated_setup_gnttab_pages(void)
 {
index 821a11a..2e55516 100644 (file)
@@ -27,7 +27,6 @@
 #include <xen/interface/memory.h>
 #include <xen/interface/physdev.h>
 #include <xen/features.h>
-#include "mmu.h"
 #include "xen-ops.h"
 #include "vdso.h"
 
@@ -82,9 +81,6 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
 
        memblock_reserve(start, size);
 
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return;
-
        xen_max_p2m_pfn = PFN_DOWN(start + size);
        for (pfn = PFN_DOWN(start); pfn < xen_max_p2m_pfn; pfn++) {
                unsigned long mfn = pfn_to_mfn(pfn);
@@ -107,7 +103,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
                .domid        = DOMID_SELF
        };
        unsigned long len = 0;
-       int xlated_phys = xen_feature(XENFEAT_auto_translated_physmap);
        unsigned long pfn;
        int ret;
 
@@ -121,7 +116,7 @@ static unsigned long __init xen_do_chunk(unsigned long start,
                                continue;
                        frame = mfn;
                } else {
-                       if (!xlated_phys && mfn != INVALID_P2M_ENTRY)
+                       if (mfn != INVALID_P2M_ENTRY)
                                continue;
                        frame = pfn;
                }
@@ -159,13 +154,6 @@ static unsigned long __init xen_do_chunk(unsigned long start,
 static unsigned long __init xen_release_chunk(unsigned long start,
                                              unsigned long end)
 {
-       /*
-        * Xen already ballooned out the E820 non RAM regions for us
-        * and set them up properly in EPT.
-        */
-       if (xen_feature(XENFEAT_auto_translated_physmap))
-               return end - start;
-
        return xen_do_chunk(start, end, true);
 }
 
@@ -234,13 +222,7 @@ static void __init xen_set_identity_and_release_chunk(
         * (except for the ISA region which must be 1:1 mapped) to
         * release the refcounts (in Xen) on the original frames.
         */
-
-       /*
-        * PVH E820 matches the hypervisor's P2M which means we need to
-        * account for the proper values of *release and *identity.
-        */
-       for (pfn = start_pfn; !xen_feature(XENFEAT_auto_translated_physmap) &&
-            pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
+       for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) {
                pte_t pte = __pte_ma(0);
 
                if (pfn < PFN_UP(ISA_END_ADDRESS))
@@ -517,6 +499,35 @@ char * __init xen_memory_setup(void)
        return "Xen";
 }
 
+/*
+ * Machine specific memory setup for auto-translated guests.
+ */
+char * __init xen_auto_xlated_memory_setup(void)
+{
+       static struct e820entry map[E820MAX] __initdata;
+
+       struct xen_memory_map memmap;
+       int i;
+       int rc;
+
+       memmap.nr_entries = E820MAX;
+       set_xen_guest_handle(memmap.buffer, map);
+
+       rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
+       if (rc < 0)
+               panic("No memory map (%d)\n", rc);
+
+       sanitize_e820_map(map, ARRAY_SIZE(map), &memmap.nr_entries);
+
+       for (i = 0; i < memmap.nr_entries; i++)
+               e820_add_region(map[i].addr, map[i].size, map[i].type);
+
+       memblock_reserve(__pa(xen_start_info->mfn_list),
+                        xen_start_info->pt_base - xen_start_info->mfn_list);
+
+       return "Xen";
+}
+
 /*
  * Set the bit indicating "nosegneg" library variants should be used.
  * We only need to bother in pure 32-bit mode; compat 32-bit processes
@@ -590,13 +601,7 @@ void xen_enable_syscall(void)
        }
 #endif /* CONFIG_X86_64 */
 }
-void xen_enable_nmi(void)
-{
-#ifdef CONFIG_X86_64
-       if (register_callback(CALLBACKTYPE_nmi, (char *)nmi))
-               BUG();
-#endif
-}
+
 void __init xen_pvmmu_arch_setup(void)
 {
        HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_4gb_segments);
@@ -611,7 +616,6 @@ void __init xen_pvmmu_arch_setup(void)
 
        xen_enable_sysenter();
        xen_enable_syscall();
-       xen_enable_nmi();
 }
 
 /* This function is not called for HVM domains */
index c834d4b..97d8765 100644 (file)
@@ -36,6 +36,7 @@ void xen_mm_unpin_all(void);
 void xen_set_pat(u64);
 
 char * __init xen_memory_setup(void);
+char * xen_auto_xlated_memory_setup(void);
 void __init xen_arch_setup(void);
 void xen_enable_sysenter(void);
 void xen_enable_syscall(void);
index f9e1ec3..8453e6e 100644 (file)
@@ -376,38 +376,42 @@ _DoubleExceptionVector_WindowOverflow:
        beqz    a2, 1f          # if at start of vector, don't restore
 
        addi    a0, a0, -128
-       bbsi    a0, 8, 1f       # don't restore except for overflow 8 and 12
-       bbsi    a0, 7, 2f
+       bbsi.l  a0, 8, 1f       # don't restore except for overflow 8 and 12
+
+       /*
+        * This fixup handler is for the extremely unlikely case where the
+        * overflow handler's reference thru a0 gets a hardware TLB refill
+        * that bumps out the (distinct, aliasing) TLB entry that mapped its
+        * prior references thru a9/a13, and where our reference now thru
+        * a9/a13 gets a 2nd-level miss exception (not hardware TLB refill).
+        */
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       bbsi.l  a0, 7, 2f
 
        /*
         * Restore a0 as saved by _WindowOverflow8().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a9, and where our reference now thru a9
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a9, -16
-       wsr     a2, depc        # replace the saved a0
-       j       1f
+       l32e    a0, a9, -16
+       wsr     a0, depc        # replace the saved a0
+       j       3f
 
 2:
        /*
         * Restore a0 as saved by _WindowOverflow12().
-        *
-        * FIXME:  we really need a fixup handler for this L32E,
-        * for the extremely unlikely case where the overflow handler's
-        * reference thru a0 gets a hardware TLB refill that bumps out
-        * the (distinct, aliasing) TLB entry that mapped its prior
-        * references thru a13, and where our reference now thru a13
-        * gets a 2nd-level miss exception (not hardware TLB refill).
         */
 
-       l32e    a2, a13, -16
-       wsr     a2, depc        # replace the saved a0
+       l32e    a0, a13, -16
+       wsr     a0, depc        # replace the saved a0
+3:
+       xsr     a3, excsave1
+       movi    a0, 0
+       s32i    a0, a3, EXC_TABLE_FIXUP
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
 1:
        /*
         * Restore WindowBase while leaving all address registers restored.
@@ -449,6 +453,7 @@ _DoubleExceptionVector_WindowOverflow:
 
        s32i    a0, a2, PT_DEPC
 
+_DoubleExceptionVector_handle_exception:
        addx4   a0, a0, a3
        l32i    a0, a0, EXC_TABLE_FAST_USER
        xsr     a3, excsave1
@@ -464,10 +469,119 @@ _DoubleExceptionVector_WindowOverflow:
        rotw    -3
        j       1b
 
-       .end literal_prefix
 
 ENDPROC(_DoubleExceptionVector)
 
+/*
+ * Fixup handler for TLB miss in double exception handler for window owerflow.
+ * We get here with windowbase set to the window that was being spilled and
+ * a0 trashed. a0 bit 7 determines if this is a call8 (bit clear) or call12
+ * (bit set) window.
+ *
+ * We do the following here:
+ * - go to the original window retaining a0 value;
+ * - set up exception stack to return back to appropriate a0 restore code
+ *   (we'll need to rotate window back and there's no place to save this
+ *    information, use different return address for that);
+ * - handle the exception;
+ * - go to the window that was being spilled;
+ * - set up window_overflow_restore_a0_fixup as a fixup routine;
+ * - reload a0;
+ * - restore the original window;
+ * - reset the default fixup routine;
+ * - return to user. By the time we get to this fixup handler all information
+ *   about the conditions of the original double exception that happened in
+ *   the window overflow handler is lost, so we just return to userspace to
+ *   retry overflow from start.
+ *
+ * a0: value of depc, original value in depc
+ * a2: trashed, original value in EXC_TABLE_DOUBLE_SAVE
+ * a3: exctable, original value in excsave1
+ */
+
+ENTRY(window_overflow_restore_a0_fixup)
+
+       rsr     a0, ps
+       extui   a0, a0, PS_OWB_SHIFT, PS_OWB_WIDTH
+       rsr     a2, windowbase
+       sub     a0, a2, a0
+       extui   a0, a0, 0, 3
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+
+       _beqi   a0, 1, .Lhandle_1
+       _beqi   a0, 3, .Lhandle_3
+
+       .macro  overflow_fixup_handle_exception_pane n
+
+       rsr     a0, depc
+       rotw    -\n
+
+       xsr     a3, excsave1
+       wsr     a2, depc
+       l32i    a2, a3, EXC_TABLE_KSTK
+       s32i    a0, a2, PT_AREG0
+
+       movi    a0, .Lrestore_\n
+       s32i    a0, a2, PT_DEPC
+       rsr     a0, exccause
+       j       _DoubleExceptionVector_handle_exception
+
+       .endm
+
+       overflow_fixup_handle_exception_pane 2
+.Lhandle_1:
+       overflow_fixup_handle_exception_pane 1
+.Lhandle_3:
+       overflow_fixup_handle_exception_pane 3
+
+       .macro  overflow_fixup_restore_a0_pane n
+
+       rotw    \n
+       /* Need to preserve a0 value here to be able to handle exception
+        * that may occur on a0 reload from stack. It may occur because
+        * TLB miss handler may not be atomic and pointer to page table
+        * may be lost before we get here. There are no free registers,
+        * so we need to use EXC_TABLE_DOUBLE_SAVE area.
+        */
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, window_overflow_restore_a0_fixup
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       bbsi.l  a0, 7, 1f
+       l32e    a0, a9, -16
+       j       2f
+1:
+       l32e    a0, a13, -16
+2:
+       rotw    -\n
+
+       .endm
+
+.Lrestore_2:
+       overflow_fixup_restore_a0_pane 2
+
+.Lset_default_fixup:
+       xsr     a3, excsave1
+       s32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       movi    a2, 0
+       s32i    a2, a3, EXC_TABLE_FIXUP
+       l32i    a2, a3, EXC_TABLE_DOUBLE_SAVE
+       xsr     a3, excsave1
+       rfe
+
+.Lrestore_1:
+       overflow_fixup_restore_a0_pane 1
+       j       .Lset_default_fixup
+.Lrestore_3:
+       overflow_fixup_restore_a0_pane 3
+       j       .Lset_default_fixup
+
+ENDPROC(window_overflow_restore_a0_fixup)
+
+       .end literal_prefix
 /*
  * Debug interrupt vector
  *
index ee32c00..d16db6d 100644 (file)
@@ -269,13 +269,13 @@ SECTIONS
                  .UserExceptionVector.literal)
   SECTION_VECTOR (_DoubleExceptionVector_literal,
                  .DoubleExceptionVector.literal,
-                 DOUBLEEXC_VECTOR_VADDR - 16,
+                 DOUBLEEXC_VECTOR_VADDR - 40,
                  SIZEOF(.UserExceptionVector.text),
                  .UserExceptionVector.text)
   SECTION_VECTOR (_DoubleExceptionVector_text,
                  .DoubleExceptionVector.text,
                  DOUBLEEXC_VECTOR_VADDR,
-                 32,
+                 40,
                  .DoubleExceptionVector.literal)
 
   . = (LOADADDR( .DoubleExceptionVector.text ) + SIZEOF( .DoubleExceptionVector.text ) + 3) & ~ 3;
index 4224256..77ed202 100644 (file)
@@ -191,7 +191,7 @@ int __init mem_reserve(unsigned long start, unsigned long end, int must_exist)
                return -EINVAL;
        }
 
-       if (it && start - it->start < bank_sz) {
+       if (it && start - it->start <= bank_sz) {
                if (start == it->start) {
                        if (end - it->start < bank_sz) {
                                it->start = end;
index 8c2e55e..0ec61c9 100644 (file)
@@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
 
                        goto done;
                }
+
+               /*
+                * If the queue doesn't support SG gaps and adding this
+                * offset would create a gap, disallow it.
+                */
+               if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
+                   bvec_gap_to_prev(prev, offset))
+                       return 0;
        }
 
        if (bio->bi_vcnt >= bio->bi_max_vecs)
index 069bc20..28d227c 100644 (file)
@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
        blkg->q = q;
        INIT_LIST_HEAD(&blkg->q_node);
        blkg->blkcg = blkcg;
-       blkg->refcnt = 1;
+       atomic_set(&blkg->refcnt, 1);
 
        /* root blkg uses @q->root_rl, init rl only for !root blkgs */
        if (blkcg != &blkcg_root) {
@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
 
        /* release the blkcg and parent blkg refs this blkg has been holding */
        css_put(&blkg->blkcg->css);
-       if (blkg->parent) {
-               spin_lock_irq(blkg->q->queue_lock);
+       if (blkg->parent)
                blkg_put(blkg->parent);
-               spin_unlock_irq(blkg->q->queue_lock);
-       }
 
        blkg_free(blkg);
 }
@@ -875,6 +872,13 @@ void blkcg_drain_queue(struct request_queue *q)
 {
        lockdep_assert_held(q->queue_lock);
 
+       /*
+        * @q could be exiting and already have destroyed all blkgs as
+        * indicated by NULL root_blkg.  If so, don't confuse policies.
+        */
+       if (!q->root_blkg)
+               return;
+
        blk_throtl_drain(q);
 }
 
@@ -1093,7 +1097,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
  * Register @pol with blkcg core.  Might sleep and @pol may be modified on
  * successful registration.  Returns 0 on success and -errno on failure.
  */
-int __init blkcg_policy_register(struct blkcg_policy *pol)
+int blkcg_policy_register(struct blkcg_policy *pol)
 {
        int i, ret;
 
index cbb7f94..d3fd7aa 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/seq_file.h>
 #include <linux/radix-tree.h>
 #include <linux/blkdev.h>
+#include <linux/atomic.h>
 
 /* Max limits for throttle policy */
 #define THROTL_IOPS_MAX                UINT_MAX
@@ -104,7 +105,7 @@ struct blkcg_gq {
        struct request_list             rl;
 
        /* reference count */
-       int                             refcnt;
+       atomic_t                        refcnt;
 
        /* is this blkg online? protected by both blkcg and q locks */
        bool                            online;
@@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q);
 void blkcg_exit_queue(struct request_queue *q);
 
 /* Blkio controller policy registration */
-int __init blkcg_policy_register(struct blkcg_policy *pol);
+int blkcg_policy_register(struct blkcg_policy *pol);
 void blkcg_policy_unregister(struct blkcg_policy *pol);
 int blkcg_activate_policy(struct request_queue *q,
                          const struct blkcg_policy *pol);
@@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
  * blkg_get - get a blkg reference
  * @blkg: blkg to get
  *
- * The caller should be holding queue_lock and an existing reference.
+ * The caller should be holding an existing reference.
  */
 static inline void blkg_get(struct blkcg_gq *blkg)
 {
-       lockdep_assert_held(blkg->q->queue_lock);
-       WARN_ON_ONCE(!blkg->refcnt);
-       blkg->refcnt++;
+       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+       atomic_inc(&blkg->refcnt);
 }
 
 void __blkg_release_rcu(struct rcu_head *rcu);
@@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
 /**
  * blkg_put - put a blkg reference
  * @blkg: blkg to put
- *
- * The caller should be holding queue_lock.
  */
 static inline void blkg_put(struct blkcg_gq *blkg)
 {
-       lockdep_assert_held(blkg->q->queue_lock);
-       WARN_ON_ONCE(blkg->refcnt <= 0);
-       if (!--blkg->refcnt)
+       WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
+       if (atomic_dec_and_test(&blkg->refcnt))
                call_rcu(&blkg->rcu_head, __blkg_release_rcu);
 }
 
@@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
 static inline void blkcg_drain_queue(struct request_queue *q) { }
 static inline void blkcg_exit_queue(struct request_queue *q) { }
-static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
+static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
 static inline int blkcg_activate_policy(struct request_queue *q,
                                        const struct blkcg_policy *pol) { return 0; }
index f6f6b9a..6f8dba1 100644 (file)
@@ -3312,8 +3312,7 @@ int __init blk_dev_init(void)
 
        /* used for unplugging and affects IO latency/throughput - HIGHPRI */
        kblockd_workqueue = alloc_workqueue("kblockd",
-                                           WQ_MEM_RECLAIM | WQ_HIGHPRI |
-                                           WQ_POWER_EFFICIENT, 0);
+                                           WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
        if (!kblockd_workqueue)
                panic("Failed to create kblockd\n");
 
index 8ffee4b..3cb5e9e 100644 (file)
@@ -421,44 +421,6 @@ void blk_insert_flush(struct request *rq)
        blk_flush_complete_seq(rq, REQ_FSEQ_ACTIONS & ~policy, 0);
 }
 
-/**
- * blk_abort_flushes - @q is being aborted, abort flush requests
- * @q: request_queue being aborted
- *
- * To be called from elv_abort_queue().  @q is being aborted.  Prepare all
- * FLUSH/FUA requests for abortion.
- *
- * CONTEXT:
- * spin_lock_irq(q->queue_lock)
- */
-void blk_abort_flushes(struct request_queue *q)
-{
-       struct request *rq, *n;
-       int i;
-
-       /*
-        * Requests in flight for data are already owned by the dispatch
-        * queue or the device driver.  Just restore for normal completion.
-        */
-       list_for_each_entry_safe(rq, n, &q->flush_data_in_flight, flush.list) {
-               list_del_init(&rq->flush.list);
-               blk_flush_restore_request(rq);
-       }
-
-       /*
-        * We need to give away requests on flush queues.  Restore for
-        * normal completion and put them on the dispatch queue.
-        */
-       for (i = 0; i < ARRAY_SIZE(q->flush_queue); i++) {
-               list_for_each_entry_safe(rq, n, &q->flush_queue[i],
-                                        flush.list) {
-                       list_del_init(&rq->flush.list);
-                       blk_flush_restore_request(rq);
-                       list_add_tail(&rq->queuelist, &q->queue_head);
-               }
-       }
-}
-
 /**
  * blkdev_issue_flush - queue a flush
  * @bdev:      blockdev to issue flush for
index b3bf0df..5453583 100644 (file)
@@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
 
 bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
 {
+       struct request_queue *q = rq->q;
+
        if (!rq_mergeable(rq) || !bio_mergeable(bio))
                return false;
 
@@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
            !blk_write_same_mergeable(rq->bio, bio))
                return false;
 
+       if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
+               struct bio_vec *bprev;
+
+               bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
+               if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
+                       return false;
+       }
+
        return true;
 }
 
index 1aab39f..c1b9242 100644 (file)
@@ -43,9 +43,16 @@ bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
        return bt_has_free_tags(&tags->bitmap_tags);
 }
 
-static inline void bt_index_inc(unsigned int *index)
+static inline int bt_index_inc(int index)
 {
-       *index = (*index + 1) & (BT_WAIT_QUEUES - 1);
+       return (index + 1) & (BT_WAIT_QUEUES - 1);
+}
+
+static inline void bt_index_atomic_inc(atomic_t *index)
+{
+       int old = atomic_read(index);
+       int new = bt_index_inc(old);
+       atomic_cmpxchg(index, old, new);
 }
 
 /*
@@ -69,14 +76,14 @@ static void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags)
        int i, wake_index;
 
        bt = &tags->bitmap_tags;
-       wake_index = bt->wake_index;
+       wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
                struct bt_wait_state *bs = &bt->bs[wake_index];
 
                if (waitqueue_active(&bs->wait))
                        wake_up(&bs->wait);
 
-               bt_index_inc(&wake_index);
+               wake_index = bt_index_inc(wake_index);
        }
 }
 
@@ -212,12 +219,14 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt,
                                         struct blk_mq_hw_ctx *hctx)
 {
        struct bt_wait_state *bs;
+       int wait_index;
 
        if (!hctx)
                return &bt->bs[0];
 
-       bs = &bt->bs[hctx->wait_index];
-       bt_index_inc(&hctx->wait_index);
+       wait_index = atomic_read(&hctx->wait_index);
+       bs = &bt->bs[wait_index];
+       bt_index_atomic_inc(&hctx->wait_index);
        return bs;
 }
 
@@ -239,18 +248,12 @@ static int bt_get(struct blk_mq_alloc_data *data,
 
        bs = bt_wait_ptr(bt, hctx);
        do {
-               bool was_empty;
-
-               was_empty = list_empty(&wait.task_list);
                prepare_to_wait(&bs->wait, &wait, TASK_UNINTERRUPTIBLE);
 
                tag = __bt_get(hctx, bt, last_tag);
                if (tag != -1)
                        break;
 
-               if (was_empty)
-                       atomic_set(&bs->wait_cnt, bt->wake_cnt);
-
                blk_mq_put_ctx(data->ctx);
 
                io_schedule();
@@ -313,18 +316,19 @@ static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt)
 {
        int i, wake_index;
 
-       wake_index = bt->wake_index;
+       wake_index = atomic_read(&bt->wake_index);
        for (i = 0; i < BT_WAIT_QUEUES; i++) {
                struct bt_wait_state *bs = &bt->bs[wake_index];
 
                if (waitqueue_active(&bs->wait)) {
-                       if (wake_index != bt->wake_index)
-                               bt->wake_index = wake_index;
+                       int o = atomic_read(&bt->wake_index);
+                       if (wake_index != o)
+                               atomic_cmpxchg(&bt->wake_index, o, wake_index);
 
                        return bs;
                }
 
-               bt_index_inc(&wake_index);
+               wake_index = bt_index_inc(wake_index);
        }
 
        return NULL;
@@ -334,6 +338,7 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
 {
        const int index = TAG_TO_INDEX(bt, tag);
        struct bt_wait_state *bs;
+       int wait_cnt;
 
        /*
         * The unlock memory barrier need to order access to req in free
@@ -342,10 +347,19 @@ static void bt_clear_tag(struct blk_mq_bitmap_tags *bt, unsigned int tag)
        clear_bit_unlock(TAG_TO_BIT(bt, tag), &bt->map[index].word);
 
        bs = bt_wake_ptr(bt);
-       if (bs && atomic_dec_and_test(&bs->wait_cnt)) {
-               atomic_set(&bs->wait_cnt, bt->wake_cnt);
-               bt_index_inc(&bt->wake_index);
+       if (!bs)
+               return;
+
+       wait_cnt = atomic_dec_return(&bs->wait_cnt);
+       if (wait_cnt == 0) {
+wake:
+               atomic_add(bt->wake_cnt, &bs->wait_cnt);
+               bt_index_atomic_inc(&bt->wake_index);
                wake_up(&bs->wait);
+       } else if (wait_cnt < 0) {
+               wait_cnt = atomic_inc_return(&bs->wait_cnt);
+               if (!wait_cnt)
+                       goto wake;
        }
 }
 
@@ -499,10 +513,13 @@ static int bt_alloc(struct blk_mq_bitmap_tags *bt, unsigned int depth,
                return -ENOMEM;
        }
 
-       for (i = 0; i < BT_WAIT_QUEUES; i++)
+       bt_update_count(bt, depth);
+
+       for (i = 0; i < BT_WAIT_QUEUES; i++) {
                init_waitqueue_head(&bt->bs[i].wait);
+               atomic_set(&bt->bs[i].wait_cnt, bt->wake_cnt);
+       }
 
-       bt_update_count(bt, depth);
        return 0;
 }
 
index 98696a6..6206ed1 100644 (file)
@@ -24,7 +24,7 @@ struct blk_mq_bitmap_tags {
        unsigned int map_nr;
        struct blk_align_bitmap *map;
 
-       unsigned int wake_index;
+       atomic_t wake_index;
        struct bt_wait_state *bs;
 };
 
index e11f5f8..ad69ef6 100644 (file)
@@ -109,7 +109,7 @@ static void blk_mq_queue_exit(struct request_queue *q)
        __percpu_counter_add(&q->mq_usage_counter, -1, 1000000);
 }
 
-static void __blk_mq_drain_queue(struct request_queue *q)
+void blk_mq_drain_queue(struct request_queue *q)
 {
        while (true) {
                s64 count;
@@ -120,7 +120,7 @@ static void __blk_mq_drain_queue(struct request_queue *q)
 
                if (count == 0)
                        break;
-               blk_mq_run_queues(q, false);
+               blk_mq_start_hw_queues(q);
                msleep(10);
        }
 }
@@ -139,12 +139,7 @@ static void blk_mq_freeze_queue(struct request_queue *q)
        spin_unlock_irq(q->queue_lock);
 
        if (drain)
-               __blk_mq_drain_queue(q);
-}
-
-void blk_mq_drain_queue(struct request_queue *q)
-{
-       __blk_mq_drain_queue(q);
+               blk_mq_drain_queue(q);
 }
 
 static void blk_mq_unfreeze_queue(struct request_queue *q)
@@ -883,7 +878,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
        clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
 
        preempt_disable();
-       __blk_mq_run_hw_queue(hctx);
+       blk_mq_run_hw_queue(hctx, false);
        preempt_enable();
 }
 EXPORT_SYMBOL(blk_mq_start_hw_queue);
index 3f33d86..a185b86 100644 (file)
@@ -27,18 +27,15 @@ struct request *blk_queue_find_tag(struct request_queue *q, int tag)
 EXPORT_SYMBOL(blk_queue_find_tag);
 
 /**
- * __blk_free_tags - release a given set of tag maintenance info
+ * blk_free_tags - release a given set of tag maintenance info
  * @bqt:       the tag map to free
  *
- * Tries to free the specified @bqt.  Returns true if it was
- * actually freed and false if there are still references using it
+ * Drop the reference count on @bqt and frees it when the last reference
+ * is dropped.
  */
-static int __blk_free_tags(struct blk_queue_tag *bqt)
+void blk_free_tags(struct blk_queue_tag *bqt)
 {
-       int retval;
-
-       retval = atomic_dec_and_test(&bqt->refcnt);
-       if (retval) {
+       if (atomic_dec_and_test(&bqt->refcnt)) {
                BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
                                                        bqt->max_depth);
 
@@ -50,9 +47,8 @@ static int __blk_free_tags(struct blk_queue_tag *bqt)
 
                kfree(bqt);
        }
-
-       return retval;
 }
+EXPORT_SYMBOL(blk_free_tags);
 
 /**
  * __blk_queue_free_tags - release tag maintenance info
@@ -69,27 +65,12 @@ void __blk_queue_free_tags(struct request_queue *q)
        if (!bqt)
                return;
 
-       __blk_free_tags(bqt);
+       blk_free_tags(bqt);
 
        q->queue_tags = NULL;
        queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
 }
 
-/**
- * blk_free_tags - release a given set of tag maintenance info
- * @bqt:       the tag map to free
- *
- * For externally managed @bqt frees the map.  Callers of this
- * function must guarantee to have released all the queues that
- * might have been using this tag map.
- */
-void blk_free_tags(struct blk_queue_tag *bqt)
-{
-       if (unlikely(!__blk_free_tags(bqt)))
-               BUG();
-}
-EXPORT_SYMBOL(blk_free_tags);
-
 /**
  * blk_queue_free_tags - release tag maintenance info
  * @q:  the request queue for the device
index 45385e9..6748c4f 100644 (file)
@@ -84,7 +84,6 @@ static inline void blk_clear_rq_complete(struct request *rq)
 #define ELV_ON_HASH(rq) ((rq)->cmd_flags & REQ_HASHED)
 
 void blk_insert_flush(struct request *rq);
-void blk_abort_flushes(struct request_queue *q);
 
 static inline struct request *__elv_next_request(struct request_queue *q)
 {
index fbd5a67..a0926a6 100644 (file)
@@ -690,6 +690,7 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
        case BLKROSET:
        case BLKDISCARD:
        case BLKSECDISCARD:
+       case BLKZEROOUT:
        /*
         * the ones below are implemented in blkdev_locked_ioctl,
         * but we call blkdev_ioctl, which gets the lock for us
index f35eddd..24c28b6 100644 (file)
@@ -729,26 +729,6 @@ int elv_may_queue(struct request_queue *q, int rw)
        return ELV_MQUEUE_MAY;
 }
 
-void elv_abort_queue(struct request_queue *q)
-{
-       struct request *rq;
-
-       blk_abort_flushes(q);
-
-       while (!list_empty(&q->queue_head)) {
-               rq = list_entry_rq(q->queue_head.next);
-               rq->cmd_flags |= REQ_QUIET;
-               trace_block_rq_abort(q, rq);
-               /*
-                * Mark this request as started so we don't trigger
-                * any debug logic in the end I/O path.
-                */
-               blk_start_request(rq);
-               __blk_end_request_all(rq, -EIO);
-       }
-}
-EXPORT_SYMBOL(elv_abort_queue);
-
 void elv_completed_request(struct request_queue *q, struct request *rq)
 {
        struct elevator_queue *e = q->elevator;
@@ -845,7 +825,7 @@ void elv_unregister_queue(struct request_queue *q)
 }
 EXPORT_SYMBOL(elv_unregister_queue);
 
-int __init elv_register(struct elevator_type *e)
+int elv_register(struct elevator_type *e)
 {
        char *def = "";
 
index c67f6f5..36b0e61 100644 (file)
 #include <linux/types.h>
 #include <linux/dmi.h>
 #include <linux/delay.h>
+#ifdef CONFIG_ACPI_PROCFS_POWER
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#endif
 #include <linux/platform_device.h>
 #include <linux/power_supply.h>
 #include <linux/acpi.h>
@@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh");
 MODULE_DESCRIPTION("ACPI AC Adapter Driver");
 MODULE_LICENSE("GPL");
 
+
 static int acpi_ac_add(struct acpi_device *device);
 static int acpi_ac_remove(struct acpi_device *device);
 static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev);
 #endif
 static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+extern struct proc_dir_entry *acpi_lock_ac_dir(void);
+extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
+static int acpi_ac_open_fs(struct inode *inode, struct file *file);
+#endif
+
+
 static int ac_sleep_before_get_state_ms;
 
 static struct acpi_driver acpi_ac_driver = {
@@ -91,6 +103,16 @@ struct acpi_ac {
 
 #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+static const struct file_operations acpi_ac_fops = {
+       .owner = THIS_MODULE,
+       .open = acpi_ac_open_fs,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+};
+#endif
+
 /* --------------------------------------------------------------------------
                                AC Adapter Management
    -------------------------------------------------------------------------- */
@@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = {
        POWER_SUPPLY_PROP_ONLINE,
 };
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+/* --------------------------------------------------------------------------
+                              FS Interface (/proc)
+   -------------------------------------------------------------------------- */
+
+static struct proc_dir_entry *acpi_ac_dir;
+
+static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
+{
+       struct acpi_ac *ac = seq->private;
+
+
+       if (!ac)
+               return 0;
+
+       if (acpi_ac_get_state(ac)) {
+               seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
+               return 0;
+       }
+
+       seq_puts(seq, "state:                   ");
+       switch (ac->state) {
+       case ACPI_AC_STATUS_OFFLINE:
+               seq_puts(seq, "off-line\n");
+               break;
+       case ACPI_AC_STATUS_ONLINE:
+               seq_puts(seq, "on-line\n");
+               break;
+       default:
+               seq_puts(seq, "unknown\n");
+               break;
+       }
+
+       return 0;
+}
+
+static int acpi_ac_open_fs(struct inode *inode, struct file *file)
+{
+       return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
+}
+
+static int acpi_ac_add_fs(struct acpi_ac *ac)
+{
+       struct proc_dir_entry *entry = NULL;
+
+       printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
+                       " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
+       if (!acpi_device_dir(ac->device)) {
+               acpi_device_dir(ac->device) =
+                       proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
+               if (!acpi_device_dir(ac->device))
+                       return -ENODEV;
+       }
+
+       /* 'state' [R] */
+       entry = proc_create_data(ACPI_AC_FILE_STATE,
+                                S_IRUGO, acpi_device_dir(ac->device),
+                                &acpi_ac_fops, ac);
+       if (!entry)
+               return -ENODEV;
+       return 0;
+}
+
+static int acpi_ac_remove_fs(struct acpi_ac *ac)
+{
+
+       if (acpi_device_dir(ac->device)) {
+               remove_proc_entry(ACPI_AC_FILE_STATE,
+                                 acpi_device_dir(ac->device));
+               remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
+               acpi_device_dir(ac->device) = NULL;
+       }
+
+       return 0;
+}
+#endif
+
 /* --------------------------------------------------------------------------
                                    Driver Model
    -------------------------------------------------------------------------- */
@@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device)
                goto end;
 
        ac->charger.name = acpi_device_bid(device);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       result = acpi_ac_add_fs(ac);
+       if (result)
+               goto end;
+#endif
        ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
        ac->charger.properties = ac_props;
        ac->charger.num_properties = ARRAY_SIZE(ac_props);
@@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device)
        ac->battery_nb.notifier_call = acpi_ac_battery_notify;
        register_acpi_notifier(&ac->battery_nb);
 end:
-       if (result)
+       if (result) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_ac_remove_fs(ac);
+#endif
                kfree(ac);
+       }
 
        dmi_check_system(ac_dmi_table);
        return result;
@@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device)
                power_supply_unregister(&ac->charger);
        unregister_acpi_notifier(&ac->battery_nb);
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_ac_remove_fs(ac);
+#endif
+
        kfree(ac);
 
        return 0;
@@ -315,9 +427,20 @@ static int __init acpi_ac_init(void)
        if (acpi_disabled)
                return -ENODEV;
 
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_ac_dir = acpi_lock_ac_dir();
+       if (!acpi_ac_dir)
+               return -ENODEV;
+#endif
+
+
        result = acpi_bus_register_driver(&acpi_ac_driver);
-       if (result < 0)
+       if (result < 0) {
+#ifdef CONFIG_ACPI_PROCFS_POWER
+               acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
                return -ENODEV;
+       }
 
        return 0;
 }
@@ -325,6 +448,9 @@ static int __init acpi_ac_init(void)
 static void __exit acpi_ac_exit(void)
 {
        acpi_bus_unregister_driver(&acpi_ac_driver);
+#ifdef CONFIG_ACPI_PROCFS_POWER
+       acpi_unlock_ac_dir(acpi_ac_dir);
+#endif
 }
 module_init(acpi_ac_init);
 module_exit(acpi_ac_exit);
index 63407d2..9cb65b0 100644 (file)
@@ -34,6 +34,9 @@ ACPI_MODULE_NAME("acpi_lpss");
 
 /* Offsets relative to LPSS_PRIVATE_OFFSET */
 #define LPSS_CLK_DIVIDER_DEF_MASK      (BIT(1) | BIT(16))
+#define LPSS_RESETS                    0x04
+#define LPSS_RESETS_RESET_FUNC         BIT(0)
+#define LPSS_RESETS_RESET_APB          BIT(1)
 #define LPSS_GENERAL                   0x08
 #define LPSS_GENERAL_LTR_MODE_SW       BIT(2)
 #define LPSS_GENERAL_UART_RTS_OVRD     BIT(3)
@@ -99,6 +102,17 @@ static void lpss_uart_setup(struct lpss_private_data *pdata)
        writel(reg | LPSS_GENERAL_UART_RTS_OVRD, pdata->mmio_base + offset);
 }
 
+static void lpss_i2c_setup(struct lpss_private_data *pdata)
+{
+       unsigned int offset;
+       u32 val;
+
+       offset = pdata->dev_desc->prv_offset + LPSS_RESETS;
+       val = readl(pdata->mmio_base + offset);
+       val |= LPSS_RESETS_RESET_APB | LPSS_RESETS_RESET_FUNC;
+       writel(val, pdata->mmio_base + offset);
+}
+
 static struct lpss_device_desc lpt_dev_desc = {
        .clk_required = true,
        .prv_offset = 0x800,
@@ -171,6 +185,7 @@ static struct lpss_device_desc byt_i2c_dev_desc = {
        .prv_offset = 0x800,
        .save_ctx = true,
        .shared_clock = &i2c_clock,
+       .setup = lpss_i2c_setup,
 };
 
 #else
index 6703c1f..4ddb0dc 100644 (file)
@@ -14,6 +14,8 @@
 #include <linux/module.h>
 
 static const struct acpi_device_id acpi_pnp_device_ids[] = {
+       /* soc_button_array */
+       {"PNP0C40"},
        /* pata_isapnp */
        {"PNP0600"},            /* Generic ESDI/IDE/ATA compatible hard disk controller */
        /* floppy */
index e48fc98..130f513 100644 (file)
 #include <linux/jiffies.h>
 #include <linux/async.h>
 #include <linux/dmi.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/suspend.h>
+#include <linux/delay.h>
 #include <asm/unaligned.h>
 
 #ifdef CONFIG_ACPI_PROCFS_POWER
@@ -70,6 +72,7 @@ MODULE_DESCRIPTION("ACPI Battery Driver");
 MODULE_LICENSE("GPL");
 
 static int battery_bix_broken_package;
+static int battery_notification_delay_ms;
 static unsigned int cache_time = 1000;
 module_param(cache_time, uint, 0644);
 MODULE_PARM_DESC(cache_time, "cache time in milliseconds");
@@ -532,6 +535,20 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
                        " invalid.\n");
        }
 
+       /*
+        * When fully charged, some batteries wrongly report
+        * capacity_now = design_capacity instead of = full_charge_capacity
+        */
+       if (battery->capacity_now > battery->full_charge_capacity
+           && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
+               battery->capacity_now = battery->full_charge_capacity;
+               if (battery->capacity_now != battery->design_capacity)
+                       printk_once(KERN_WARNING FW_BUG
+                               "battery: reported current charge level (%d) "
+                               "is higher than reported maximum charge level (%d).\n",
+                               battery->capacity_now, battery->full_charge_capacity);
+       }
+
        if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
            && battery->capacity_now >= 0 && battery->capacity_now <= 100)
                battery->capacity_now = (battery->capacity_now *
@@ -930,7 +947,10 @@ static ssize_t acpi_battery_write_alarm(struct file *file,
                goto end;
        }
        alarm_string[count] = '\0';
-       battery->alarm = simple_strtol(alarm_string, NULL, 0);
+       if (kstrtoint(alarm_string, 0, &battery->alarm)) {
+               result = -EINVAL;
+               goto end;
+       }
        result = acpi_battery_set_alarm(battery);
       end:
        if (!result)
@@ -1062,6 +1082,14 @@ static void acpi_battery_notify(struct acpi_device *device, u32 event)
        if (!battery)
                return;
        old = battery->bat.dev;
+       /*
+       * On Acer Aspire V5-573G notifications are sometimes triggered too
+       * early. For example, when AC is unplugged and notification is
+       * triggered, battery state is still reported as "Full", and changes to
+       * "Discharging" only after short delay, without any notification.
+       */
+       if (battery_notification_delay_ms > 0)
+               msleep(battery_notification_delay_ms);
        if (event == ACPI_BATTERY_NOTIFY_INFO)
                acpi_battery_refresh(battery);
        acpi_battery_update(battery, false);
@@ -1106,17 +1134,60 @@ static int battery_notify(struct notifier_block *nb,
        return 0;
 }
 
+static int battery_bix_broken_package_quirk(const struct dmi_system_id *d)
+{
+       battery_bix_broken_package = 1;
+       return 0;
+}
+
+static int battery_notification_delay_quirk(const struct dmi_system_id *d)
+{
+       battery_notification_delay_ms = 1000;
+       return 0;
+}
+
 static struct dmi_system_id bat_dmi_table[] = {
        {
+               .callback = battery_bix_broken_package_quirk,
                .ident = "NEC LZ750/LS",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "NEC"),
                        DMI_MATCH(DMI_PRODUCT_NAME, "PC-LZ750LS"),
                },
        },
+       {
+               .callback = battery_notification_delay_quirk,
+               .ident = "Acer Aspire V5-573G",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-573G"),
+               },
+       },
        {},
 };
 
+/*
+ * Some machines'(E,G Lenovo Z480) ECs are not stable
+ * during boot up and this causes battery driver fails to be
+ * probed due to failure of getting battery information
+ * from EC sometimes. After several retries, the operation
+ * may work. So add retry code here and 20ms sleep between
+ * every retries.
+ */
+static int acpi_battery_update_retry(struct acpi_battery *battery)
+{
+       int retry, ret;
+
+       for (retry = 5; retry; retry--) {
+               ret = acpi_battery_update(battery, false);
+               if (!ret)
+                       break;
+
+               msleep(20);
+       }
+       return ret;
+}
+
 static int acpi_battery_add(struct acpi_device *device)
 {
        int result = 0;
@@ -1135,9 +1206,11 @@ static int acpi_battery_add(struct acpi_device *device)
        mutex_init(&battery->sysfs_lock);
        if (acpi_has_method(battery->device->handle, "_BIX"))
                set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
-       result = acpi_battery_update(battery, false);
+
+       result = acpi_battery_update_retry(battery);
        if (result)
                goto fail;
+
 #ifdef CONFIG_ACPI_PROCFS_POWER
        result = acpi_battery_add_fs(device);
 #endif
@@ -1227,8 +1300,7 @@ static void __init acpi_battery_init_async(void *unused, async_cookie_t cookie)
        if (acpi_disabled)
                return;
 
-       if (dmi_check_system(bat_dmi_table))
-               battery_bix_broken_package = 1;
+       dmi_check_system(bat_dmi_table);
        
 #ifdef CONFIG_ACPI_PROCFS_POWER
        acpi_battery_dir = acpi_lock_battery_dir();
index ad11ba4..a66ab65 100644 (file)
@@ -1,11 +1,14 @@
 /*
- *  ec.c - ACPI Embedded Controller Driver (v2.1)
+ *  ec.c - ACPI Embedded Controller Driver (v2.2)
  *
- *  Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de>
- *  Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com>
- *  Copyright (C) 2004 Luming Yu <luming.yu@intel.com>
- *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
- *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (C) 2001-2014 Intel Corporation
+ *    Author: 2014       Lv Zheng <lv.zheng@intel.com>
+ *            2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
+ *            2006       Denis Sadykov <denis.m.sadykov@intel.com>
+ *            2004       Luming Yu <luming.yu@intel.com>
+ *            2001, 2002 Andy Grover <andrew.grover@intel.com>
+ *            2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ *  Copyright (C) 2008      Alexey Starikovskiy <astarikovskiy@suse.de>
  *
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  *
@@ -52,6 +55,7 @@
 /* EC status register */
 #define ACPI_EC_FLAG_OBF       0x01    /* Output buffer full */
 #define ACPI_EC_FLAG_IBF       0x02    /* Input buffer full */
+#define ACPI_EC_FLAG_CMD       0x08    /* Input buffer contains a command */
 #define ACPI_EC_FLAG_BURST     0x10    /* burst mode */
 #define ACPI_EC_FLAG_SCI       0x20    /* EC-SCI occurred */
 
@@ -78,6 +82,9 @@ enum {
        EC_FLAGS_BLOCKED,               /* Transactions are blocked */
 };
 
+#define ACPI_EC_COMMAND_POLL           0x01 /* Available for command byte */
+#define ACPI_EC_COMMAND_COMPLETE       0x02 /* Completed last byte */
+
 /* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
 static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
 module_param(ec_delay, uint, 0644);
@@ -109,7 +116,7 @@ struct transaction {
        u8 ri;
        u8 wlen;
        u8 rlen;
-       bool done;
+       u8 flags;
 };
 
 struct acpi_ec *boot_ec, *first_ec;
@@ -127,83 +134,104 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
 static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
 {
        u8 x = inb(ec->command_addr);
-       pr_debug("---> status = 0x%2.2x\n", x);
+       pr_debug("EC_SC(R) = 0x%2.2x "
+                "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
+                x,
+                !!(x & ACPI_EC_FLAG_SCI),
+                !!(x & ACPI_EC_FLAG_BURST),
+                !!(x & ACPI_EC_FLAG_CMD),
+                !!(x & ACPI_EC_FLAG_IBF),
+                !!(x & ACPI_EC_FLAG_OBF));
        return x;
 }
 
 static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
 {
        u8 x = inb(ec->data_addr);
-       pr_debug("---> data = 0x%2.2x\n", x);
+       pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
        return x;
 }
 
 static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
 {
-       pr_debug("<--- command = 0x%2.2x\n", command);
+       pr_debug("EC_SC(W) = 0x%2.2x\n", command);
        outb(command, ec->command_addr);
 }
 
 static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
 {
-       pr_debug("<--- data = 0x%2.2x\n", data);
+       pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
        outb(data, ec->data_addr);
 }
 
-static int ec_transaction_done(struct acpi_ec *ec)
+static int ec_transaction_completed(struct acpi_ec *ec)
 {
        unsigned long flags;
        int ret = 0;
        spin_lock_irqsave(&ec->lock, flags);
-       if (!ec->curr || ec->curr->done)
+       if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
                ret = 1;
        spin_unlock_irqrestore(&ec->lock, flags);
        return ret;
 }
 
-static void start_transaction(struct acpi_ec *ec)
+static bool advance_transaction(struct acpi_ec *ec)
 {
-       ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
-       ec->curr->done = false;
-       acpi_ec_write_cmd(ec, ec->curr->command);
-}
-
-static void advance_transaction(struct acpi_ec *ec, u8 status)
-{
-       unsigned long flags;
        struct transaction *t;
+       u8 status;
+       bool wakeup = false;
 
-       spin_lock_irqsave(&ec->lock, flags);
+       pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
+       status = acpi_ec_read_status(ec);
        t = ec->curr;
        if (!t)
-               goto unlock;
-       if (t->wlen > t->wi) {
-               if ((status & ACPI_EC_FLAG_IBF) == 0)
-                       acpi_ec_write_data(ec,
-                               t->wdata[t->wi++]);
-               else
-                       goto err;
-       } else if (t->rlen > t->ri) {
-               if ((status & ACPI_EC_FLAG_OBF) == 1) {
-                       t->rdata[t->ri++] = acpi_ec_read_data(ec);
-                       if (t->rlen == t->ri)
-                               t->done = true;
+               goto err;
+       if (t->flags & ACPI_EC_COMMAND_POLL) {
+               if (t->wlen > t->wi) {
+                       if ((status & ACPI_EC_FLAG_IBF) == 0)
+                               acpi_ec_write_data(ec, t->wdata[t->wi++]);
+                       else
+                               goto err;
+               } else if (t->rlen > t->ri) {
+                       if ((status & ACPI_EC_FLAG_OBF) == 1) {
+                               t->rdata[t->ri++] = acpi_ec_read_data(ec);
+                               if (t->rlen == t->ri) {
+                                       t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                                       wakeup = true;
+                               }
+                       } else
+                               goto err;
+               } else if (t->wlen == t->wi &&
+                          (status & ACPI_EC_FLAG_IBF) == 0) {
+                       t->flags |= ACPI_EC_COMMAND_COMPLETE;
+                       wakeup = true;
+               }
+               return wakeup;
+       } else {
+               if ((status & ACPI_EC_FLAG_IBF) == 0) {
+                       acpi_ec_write_cmd(ec, t->command);
+                       t->flags |= ACPI_EC_COMMAND_POLL;
                } else
                        goto err;
-       } else if (t->wlen == t->wi &&
-                  (status & ACPI_EC_FLAG_IBF) == 0)
-               t->done = true;
-       goto unlock;
+               return wakeup;
+       }
 err:
        /*
         * If SCI bit is set, then don't think it's a false IRQ
         * otherwise will take a not handled IRQ as a false one.
         */
-       if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI))
-               ++t->irq_count;
+       if (!(status & ACPI_EC_FLAG_SCI)) {
+               if (in_interrupt() && t)
+                       ++t->irq_count;
+       }
+       return wakeup;
+}
 
-unlock:
-       spin_unlock_irqrestore(&ec->lock, flags);
+static void start_transaction(struct acpi_ec *ec)
+{
+       ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
+       ec->curr->flags = 0;
+       (void)advance_transaction(ec);
 }
 
 static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
@@ -228,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec)
                        /* don't sleep with disabled interrupts */
                        if (EC_FLAGS_MSI || irqs_disabled()) {
                                udelay(ACPI_EC_MSI_UDELAY);
-                               if (ec_transaction_done(ec))
+                               if (ec_transaction_completed(ec))
                                        return 0;
                        } else {
                                if (wait_event_timeout(ec->wait,
-                                               ec_transaction_done(ec),
+                                               ec_transaction_completed(ec),
                                                msecs_to_jiffies(1)))
                                        return 0;
                        }
-                       advance_transaction(ec, acpi_ec_read_status(ec));
+                       spin_lock_irqsave(&ec->lock, flags);
+                       (void)advance_transaction(ec);
+                       spin_unlock_irqrestore(&ec->lock, flags);
                } while (time_before(jiffies, delay));
                pr_debug("controller reset, restart transaction\n");
                spin_lock_irqsave(&ec->lock, flags);
@@ -268,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
        return ret;
 }
 
-static int ec_check_ibf0(struct acpi_ec *ec)
-{
-       u8 status = acpi_ec_read_status(ec);
-       return (status & ACPI_EC_FLAG_IBF) == 0;
-}
-
-static int ec_wait_ibf0(struct acpi_ec *ec)
-{
-       unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
-       /* interrupt wait manually if GPE mode is not active */
-       while (time_before(jiffies, delay))
-               if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
-                                       msecs_to_jiffies(1)))
-                       return 0;
-       return -ETIME;
-}
-
 static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
 {
        int status;
@@ -305,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                        goto unlock;
                }
        }
-       if (ec_wait_ibf0(ec)) {
-               pr_err("input buffer is not empty, "
-                               "aborting transaction\n");
-               status = -ETIME;
-               goto end;
-       }
        pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
                        t->command, t->wdata ? t->wdata[0] : 0);
        /* disable GPE during transaction if storm is detected */
@@ -334,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
                set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
        }
        pr_debug("transaction end\n");
-end:
        if (ec->global_lock)
                acpi_release_global_lock(glk);
 unlock:
@@ -634,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
 static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
        u32 gpe_number, void *data)
 {
+       unsigned long flags;
        struct acpi_ec *ec = data;
-       u8 status = acpi_ec_read_status(ec);
 
-       pr_debug("~~~> interrupt, status:0x%02x\n", status);
-
-       advance_transaction(ec, status);
-       if (ec_transaction_done(ec) &&
-           (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
+       spin_lock_irqsave(&ec->lock, flags);
+       if (advance_transaction(ec))
                wake_up(&ec->wait);
-               ec_check_sci(ec, acpi_ec_read_status(ec));
-       }
+       spin_unlock_irqrestore(&ec->lock, flags);
+       ec_check_sci(ec, acpi_ec_read_status(ec));
        return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
 }
 
@@ -1066,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void)
        /* fall through */
        }
 
-       if (EC_FLAGS_SKIP_DSDT_SCAN)
+       if (EC_FLAGS_SKIP_DSDT_SCAN) {
+               kfree(saved_ec);
                return -ENODEV;
+       }
 
        /* This workaround is needed only on some broken machines,
         * which require early EC, but fail to provide ECDT */
@@ -1105,6 +1110,7 @@ install:
        }
 error:
        kfree(boot_ec);
+       kfree(saved_ec);
        boot_ec = NULL;
        return -ENODEV;
 }
index 3f2bdc8..bad25b0 100644 (file)
@@ -235,7 +235,8 @@ void acpi_os_vprintf(const char *fmt, va_list args)
 static unsigned long acpi_rsdp;
 static int __init setup_acpi_rsdp(char *arg)
 {
-       acpi_rsdp = simple_strtoul(arg, NULL, 16);
+       if (kstrtoul(arg, 16, &acpi_rsdp))
+               return -EINVAL;
        return 0;
 }
 early_param("acpi_rsdp", setup_acpi_rsdp);
index 0bdacc5..2ba8f02 100644 (file)
@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_MEMORY24:
                memory24 = &ares->data.memory24;
-               if (!memory24->address_length)
+               if (!memory24->minimum && !memory24->address_length)
                        return false;
                acpi_dev_get_memresource(res, memory24->minimum,
                                         memory24->address_length,
@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
                break;
        case ACPI_RESOURCE_TYPE_MEMORY32:
                memory32 = &ares->data.memory32;
-               if (!memory32->address_length)
+               if (!memory32->minimum && !memory32->address_length)
                        return false;
                acpi_dev_get_memresource(res, memory32->minimum,
                                         memory32->address_length,
@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
                break;
        case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
                fixed_memory32 = &ares->data.fixed_memory32;
-               if (!fixed_memory32->address_length)
+               if (!fixed_memory32->address && !fixed_memory32->address_length)
                        return false;
                acpi_dev_get_memresource(res, fixed_memory32->address,
                                         fixed_memory32->address_length,
@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
        switch (ares->type) {
        case ACPI_RESOURCE_TYPE_IO:
                io = &ares->data.io;
-               if (!io->address_length)
+               if (!io->minimum && !io->address_length)
                        return false;
                acpi_dev_get_ioresource(res, io->minimum,
                                        io->address_length,
@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
                break;
        case ACPI_RESOURCE_TYPE_FIXED_IO:
                fixed_io = &ares->data.fixed_io;
-               if (!fixed_io->address_length)
+               if (!fixed_io->address && !fixed_io->address_length)
                        return false;
                acpi_dev_get_ioresource(res, fixed_io->address,
                                        fixed_io->address_length,
index 05550ba..6d5a6cd 100644 (file)
@@ -360,7 +360,8 @@ static int __init acpi_parse_apic_instance(char *str)
        if (!str)
                return -EINVAL;
 
-       acpi_apic_instance = simple_strtoul(str, NULL, 0);
+       if (kstrtoint(str, 0, &acpi_apic_instance))
+               return -EINVAL;
 
        pr_notice("Shall use APIC/MADT table %d\n", acpi_apic_instance);
 
index fb9ffe9..350d52a 100644 (file)
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
 MODULE_DESCRIPTION("ACPI Video Driver");
 MODULE_LICENSE("GPL");
 
-static bool brightness_switch_enabled;
+static bool brightness_switch_enabled = 1;
 module_param(brightness_switch_enabled, bool, 0644);
 
 /*
@@ -241,13 +241,14 @@ static bool acpi_video_use_native_backlight(void)
                return use_native_backlight_dmi;
 }
 
-static bool acpi_video_verify_backlight_support(void)
+bool acpi_video_verify_backlight_support(void)
 {
        if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
            backlight_device_registered(BACKLIGHT_RAW))
                return false;
        return acpi_video_backlight_support();
 }
+EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support);
 
 /* backlight device sysfs support */
 static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -562,6 +563,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
                DMI_MATCH(DMI_PRODUCT_NAME, "Aspire V5-471G"),
                },
        },
+       {
+        .callback = video_set_use_native_backlight,
+        .ident = "Acer TravelMate B113",
+        .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
+               },
+       },
        {
        .callback = video_set_use_native_backlight,
        .ident = "HP ProBook 4340s",
@@ -572,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
        },
        {
        .callback = video_set_use_native_backlight,
+       .ident = "HP ProBook 4540s",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
+               },
+       },
+       {
+       .callback = video_set_use_native_backlight,
        .ident = "HP ProBook 2013 models",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
index 33e3db5..c42feb2 100644 (file)
@@ -166,6 +166,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
                DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
                },
        },
+       {
+       .callback = video_detect_force_vendor,
+       .ident = "Dell Inspiron 5737",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
+               },
+       },
        { },
 };
 
index dae5607..4cd52a4 100644 (file)
@@ -456,6 +456,7 @@ static const struct pci_device_id ahci_pci_tbl[] = {
 
        /* Promise */
        { PCI_VDEVICE(PROMISE, 0x3f20), board_ahci },   /* PDC42819 */
+       { PCI_VDEVICE(PROMISE, 0x3781), board_ahci },   /* FastTrak TX8660 ahci-mode */
 
        /* Asmedia */
        { PCI_VDEVICE(ASMEDIA, 0x0601), board_ahci },   /* ASM1060 */
index 05882e4..5513296 100644 (file)
@@ -371,7 +371,9 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
                      int pmp, unsigned long deadline,
                      int (*check_ready)(struct ata_link *link));
 
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
 int ahci_stop_engine(struct ata_port *ap);
+void ahci_start_fis_rx(struct ata_port *ap);
 void ahci_start_engine(struct ata_port *ap);
 int ahci_check_ready(struct ata_link *link);
 int ahci_kick_engine(struct ata_port *ap);
index 3a90152..cac4360 100644 (file)
@@ -58,6 +58,8 @@ enum ahci_imx_type {
 struct imx_ahci_priv {
        struct platform_device *ahci_pdev;
        enum ahci_imx_type type;
+       struct clk *sata_clk;
+       struct clk *sata_ref_clk;
        struct clk *ahb_clk;
        struct regmap *gpr;
        bool no_device;
@@ -224,7 +226,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
                        return ret;
        }
 
-       ret = ahci_platform_enable_clks(hpriv);
+       ret = clk_prepare_enable(imxpriv->sata_ref_clk);
        if (ret < 0)
                goto disable_regulator;
 
@@ -291,7 +293,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
                                   !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
        }
 
-       ahci_platform_disable_clks(hpriv);
+       clk_disable_unprepare(imxpriv->sata_ref_clk);
 
        if (hpriv->target_pwr)
                regulator_disable(hpriv->target_pwr);
@@ -324,6 +326,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
        writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
        imx_sata_disable(hpriv);
        imxpriv->no_device = true;
+
+       dev_info(ap->dev, "no device found, disabling link.\n");
+       dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
 }
 
 static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
@@ -385,6 +390,19 @@ static int imx_ahci_probe(struct platform_device *pdev)
        imxpriv->no_device = false;
        imxpriv->first_time = true;
        imxpriv->type = (enum ahci_imx_type)of_id->data;
+
+       imxpriv->sata_clk = devm_clk_get(dev, "sata");
+       if (IS_ERR(imxpriv->sata_clk)) {
+               dev_err(dev, "can't get sata clock.\n");
+               return PTR_ERR(imxpriv->sata_clk);
+       }
+
+       imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
+       if (IS_ERR(imxpriv->sata_ref_clk)) {
+               dev_err(dev, "can't get sata_ref clock.\n");
+               return PTR_ERR(imxpriv->sata_ref_clk);
+       }
+
        imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
        if (IS_ERR(imxpriv->ahb_clk)) {
                dev_err(dev, "can't get ahb clock.\n");
@@ -407,10 +425,14 @@ static int imx_ahci_probe(struct platform_device *pdev)
 
        hpriv->plat_data = imxpriv;
 
-       ret = imx_sata_enable(hpriv);
+       ret = clk_prepare_enable(imxpriv->sata_clk);
        if (ret)
                return ret;
 
+       ret = imx_sata_enable(hpriv);
+       if (ret)
+               goto disable_clk;
+
        /*
         * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
         * and IP vendor specific register IMX_TIMER1MS.
@@ -435,16 +457,24 @@ static int imx_ahci_probe(struct platform_device *pdev)
        ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
                                      0, 0, 0);
        if (ret)
-               imx_sata_disable(hpriv);
+               goto disable_sata;
 
+       return 0;
+
+disable_sata:
+       imx_sata_disable(hpriv);
+disable_clk:
+       clk_disable_unprepare(imxpriv->sata_clk);
        return ret;
 }
 
 static void ahci_imx_host_stop(struct ata_host *host)
 {
        struct ahci_host_priv *hpriv = host->private_data;
+       struct imx_ahci_priv *imxpriv = hpriv->plat_data;
 
        imx_sata_disable(hpriv);
+       clk_disable_unprepare(imxpriv->sata_clk);
 }
 
 #ifdef CONFIG_PM_SLEEP
index ebe505c..b10d81d 100644 (file)
@@ -58,7 +58,7 @@ static int ahci_probe(struct platform_device *pdev)
        }
 
        if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
-               hflags |= AHCI_HFLAG_NO_FBS;
+               hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
 
        rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
                                     hflags, 0, 0);
index 042a9bb..ee3a365 100644 (file)
@@ -78,6 +78,7 @@
 struct xgene_ahci_context {
        struct ahci_host_priv *hpriv;
        struct device *dev;
+       u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
        void __iomem *csr_core;         /* Core CSR address of IP */
        void __iomem *csr_diag;         /* Diag CSR address of IP */
        void __iomem *csr_axi;          /* AXI CSR address of IP */
@@ -97,6 +98,50 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
        return 0;
 }
 
+/**
+ * xgene_ahci_restart_engine - Restart the dma engine.
+ * @ap : ATA port of interest
+ *
+ * Restarts the dma engine inside the controller.
+ */
+static int xgene_ahci_restart_engine(struct ata_port *ap)
+{
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+
+       ahci_stop_engine(ap);
+       ahci_start_fis_rx(ap);
+       hpriv->start_engine(ap);
+
+       return 0;
+}
+
+/**
+ * xgene_ahci_qc_issue - Issue commands to the device
+ * @qc: Command to issue
+ *
+ * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
+ * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
+ * state machine goes into the CMFatalErrorUpdate state and locks up. By
+ * restarting the dma engine, it removes the controller out of lock up state.
+ */
+static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
+{
+       struct ata_port *ap = qc->ap;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+       struct xgene_ahci_context *ctx = hpriv->plat_data;
+       int rc = 0;
+
+       if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
+               xgene_ahci_restart_engine(ap);
+
+       rc = ahci_qc_issue(qc);
+
+       /* Save the last command issued */
+       ctx->last_cmd[ap->port_no] = qc->tf.command;
+
+       return rc;
+}
+
 /**
  * xgene_ahci_read_id - Read ID data from the specified device
  * @dev: device
@@ -104,14 +149,12 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
  * @id: data buffer
  *
  * This custom read ID function is required due to the fact that the HW
- * does not support DEVSLP and the controller state machine may get stuck
- * after processing the ID query command.
+ * does not support DEVSLP.
  */
 static unsigned int xgene_ahci_read_id(struct ata_device *dev,
                                       struct ata_taskfile *tf, u16 *id)
 {
        u32 err_mask;
-       void __iomem *port_mmio = ahci_port_base(dev->link->ap);
 
        err_mask = ata_do_dev_read_id(dev, tf, id);
        if (err_mask)
@@ -133,16 +176,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
         */
        id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
 
-       /*
-        * Due to HW errata, restart the port if no other command active.
-        * Otherwise the controller may get stuck.
-        */
-       if (!readl(port_mmio + PORT_CMD_ISSUE)) {
-               writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
-               readl(port_mmio + PORT_CMD);    /* Force a barrier */
-               writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
-               readl(port_mmio + PORT_CMD);    /* Force a barrier */
-       }
        return 0;
 }
 
@@ -300,6 +333,7 @@ static struct ata_port_operations xgene_ahci_ops = {
        .host_stop = xgene_ahci_host_stop,
        .hardreset = xgene_ahci_hardreset,
        .read_id = xgene_ahci_read_id,
+       .qc_issue = xgene_ahci_qc_issue,
 };
 
 static const struct ata_port_info xgene_ahci_port_info = {
index 40ea583..d72ce04 100644 (file)
@@ -68,7 +68,6 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
 
 static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
 static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
 static int ahci_port_start(struct ata_port *ap);
 static void ahci_port_stop(struct ata_port *ap);
@@ -620,7 +619,7 @@ int ahci_stop_engine(struct ata_port *ap)
 }
 EXPORT_SYMBOL_GPL(ahci_stop_engine);
 
-static void ahci_start_fis_rx(struct ata_port *ap)
+void ahci_start_fis_rx(struct ata_port *ap)
 {
        void __iomem *port_mmio = ahci_port_base(ap);
        struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -646,6 +645,7 @@ static void ahci_start_fis_rx(struct ata_port *ap)
        /* flush */
        readl(port_mmio + PORT_CMD);
 }
+EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
 
 static int ahci_stop_fis_rx(struct ata_port *ap)
 {
@@ -1945,7 +1945,7 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
 }
 EXPORT_SYMBOL_GPL(ahci_interrupt);
 
-static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
+unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
 {
        struct ata_port *ap = qc->ap;
        void __iomem *port_mmio = ahci_port_base(ap);
@@ -1974,6 +1974,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
 
        return 0;
 }
+EXPORT_SYMBOL_GPL(ahci_qc_issue);
 
 static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
 {
index 3a5b4ed..b007758 100644 (file)
@@ -250,8 +250,13 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
        if (IS_ERR(hpriv->phy)) {
                rc = PTR_ERR(hpriv->phy);
                switch (rc) {
-               case -ENODEV:
                case -ENOSYS:
+                       /* No PHY support. Check if PHY is required. */
+                       if (of_find_property(dev->of_node, "phys", NULL)) {
+                               dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
+                               goto err_out;
+                       }
+               case -ENODEV:
                        /* continue normally */
                        hpriv->phy = NULL;
                        break;
index 18d97d5..677c0c1 100644 (file)
@@ -4787,6 +4787,10 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
  *     ata_qc_new - Request an available ATA command, for queueing
  *     @ap: target port
  *
+ *     Some ATA host controllers may implement a queue depth which is less
+ *     than ATA_MAX_QUEUE. So we shouldn't allocate a tag which is beyond
+ *     the hardware limitation.
+ *
  *     LOCKING:
  *     None.
  */
@@ -4794,14 +4798,15 @@ void swap_buf_le16(u16 *buf, unsigned int buf_words)
 static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
 {
        struct ata_queued_cmd *qc = NULL;
+       unsigned int max_queue = ap->host->n_tags;
        unsigned int i, tag;
 
        /* no command while frozen */
        if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
                return NULL;
 
-       for (i = 0; i < ATA_MAX_QUEUE; i++) {
-               tag = (i + ap->last_tag + 1) % ATA_MAX_QUEUE;
+       for (i = 0, tag = ap->last_tag + 1; i < max_queue; i++, tag++) {
+               tag = tag < max_queue ? tag : 0;
 
                /* the last tag is reserved for internal command. */
                if (tag == ATA_TAG_INTERNAL)
@@ -6088,6 +6093,7 @@ void ata_host_init(struct ata_host *host, struct device *dev,
 {
        spin_lock_init(&host->lock);
        mutex_init(&host->eh_mutex);
+       host->n_tags = ATA_MAX_QUEUE - 1;
        host->dev = dev;
        host->ops = ops;
 }
@@ -6169,6 +6175,8 @@ int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
 {
        int i, rc;
 
+       host->n_tags = clamp(sht->can_queue, 1, ATA_MAX_QUEUE - 1);
+
        /* host must have been started */
        if (!(host->flags & ATA_HOST_STARTED)) {
                dev_err(host->dev, "BUG: trying to register unstarted host\n");
index 6760fc4..dad83df 100644 (file)
@@ -1811,7 +1811,7 @@ static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
        case ATA_DEV_ATA:
                if (err & ATA_ICRC)
                        qc->err_mask |= AC_ERR_ATA_BUS;
-               if (err & ATA_UNC)
+               if (err & (ATA_UNC | ATA_AMNF))
                        qc->err_mask |= AC_ERR_MEDIA;
                if (err & ATA_IDNF)
                        qc->err_mask |= AC_ERR_INVALID;
@@ -2556,11 +2556,12 @@ static void ata_eh_link_report(struct ata_link *link)
                }
 
                if (cmd->command != ATA_CMD_PACKET &&
-                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
-                                    ATA_ABORTED)))
-                       ata_dev_err(qc->dev, "error: { %s%s%s%s}\n",
+                   (res->feature & (ATA_ICRC | ATA_UNC | ATA_AMNF |
+                                    ATA_IDNF | ATA_ABORTED)))
+                       ata_dev_err(qc->dev, "error: { %s%s%s%s%s}\n",
                          res->feature & ATA_ICRC ? "ICRC " : "",
                          res->feature & ATA_UNC ? "UNC " : "",
+                         res->feature & ATA_AMNF ? "AMNF " : "",
                          res->feature & ATA_IDNF ? "IDNF " : "",
                          res->feature & ATA_ABORTED ? "ABRT " : "");
 #endif
index 6ad5c07..4d37c54 100644 (file)
@@ -915,7 +915,7 @@ static int ep93xx_pata_probe(struct platform_device *pdev)
        struct ep93xx_pata_data *drv_data;
        struct ata_host *host;
        struct ata_port *ap;
-       unsigned int irq;
+       int irq;
        struct resource *mem_res;
        void __iomem *ide_base;
        int err;
index b1955ba..d65975a 100644 (file)
@@ -2155,7 +2155,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
 
                if (!tx->send) continue;
                if (!--left) {
-                       return sprintf(page,"tx[%d]:    0x%ld-0x%ld "
+                       return sprintf(page, "tx[%d]:    0x%lx-0x%lx "
                            "(%6ld bytes), rsv %d cps, shp %d cps%s\n",i,
                            (unsigned long) (tx->send - eni_dev->ram),
                            tx->send-eni_dev->ram+tx->words*4-1,tx->words*4,
@@ -2181,7 +2181,7 @@ static int eni_proc_read(struct atm_dev *dev,loff_t *pos,char *page)
                        if (--left) continue;
                        length = sprintf(page,"vcc %4d: ",vcc->vci);
                        if (eni_vcc->rx) {
-                               length += sprintf(page+length,"0x%ld-0x%ld "
+                               length += sprintf(page+length, "0x%lx-0x%lx "
                                    "(%6ld bytes)",
                                    (unsigned long) (eni_vcc->recv - eni_dev->ram),
                                    eni_vcc->recv-eni_dev->ram+eni_vcc->words*4-1,
index 83969f8..6467c91 100644 (file)
@@ -176,14 +176,24 @@ static int __init cma_activate_area(struct cma *cma)
                base_pfn = pfn;
                for (j = pageblock_nr_pages; j; --j, pfn++) {
                        WARN_ON_ONCE(!pfn_valid(pfn));
+                       /*
+                        * alloc_contig_range requires the pfn range
+                        * specified to be in the same zone. Make this
+                        * simple by forcing the entire CMA resv range
+                        * to be in the same zone.
+                        */
                        if (page_zone(pfn_to_page(pfn)) != zone)
-                               return -EINVAL;
+                               goto err;
                }
                init_cma_reserved_pageblock(pfn_to_page(base_pfn));
        } while (--i);
 
        mutex_init(&cma->lock);
        return 0;
+
+err:
+       kfree(cma->bitmap);
+       return -EINVAL;
 }
 
 static struct cma cma_areas[MAX_CMA_AREAS];
index 9e9227e..eee48c4 100644 (file)
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
        return dev->archdata.irqs[num];
 #else
        struct resource *r;
-       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
-               return of_irq_get(dev->dev.of_node, num);
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+               int ret;
+
+               ret = of_irq_get(dev->dev.of_node, num);
+               if (ret >= 0 || ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        r = platform_get_resource(dev, IORESOURCE_IRQ, num);
 
@@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
 {
        struct resource *r;
 
-       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node)
-               return of_irq_get_byname(dev->dev.of_node, name);
+       if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
+               int ret;
+
+               ret = of_irq_get_byname(dev->dev.of_node, name);
+               if (ret >= 0 || ret == -EPROBE_DEFER)
+                       return ret;
+       }
 
        r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
        return r ? r->start : -ENXIO;
index 1b35c45..3f2e167 100644 (file)
@@ -544,6 +544,12 @@ void conn_try_outdate_peer_async(struct drbd_connection *connection)
        struct task_struct *opa;
 
        kref_get(&connection->kref);
+       /* We may just have force_sig()'ed this thread
+        * to get it out of some blocking network function.
+        * Clear signals; otherwise kthread_run(), which internally uses
+        * wait_on_completion_killable(), will mistake our pending signal
+        * for a new fatal signal and fail. */
+       flush_signals(current);
        opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
        if (IS_ERR(opa)) {
                drbd_err(connection, "out of mem, failed to invoke fence-peer helper\n");
index b6c8aaf..5b17ec8 100644 (file)
@@ -1337,8 +1337,11 @@ int drbd_submit_peer_request(struct drbd_device *device,
                return 0;
        }
 
+       /* Discards don't have any payload.
+        * But the scsi layer still expects a bio_vec it can use internally,
+        * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
        if (peer_req->flags & EE_IS_TRIM)
-               nr_pages = 0; /* discards don't have any payload. */
+               nr_pages = 1;
 
        /* In most cases, we will only need one bio.  But in case the lower
         * level restrictions happen to be different at this offset on this
index 677db04..56d46ff 100644 (file)
@@ -3777,7 +3777,7 @@ static void floppy_rb0_cb(struct bio *bio, int err)
        int drive = cbdata->drive;
 
        if (err) {
-               pr_info("floppy: error %d while reading block 0", err);
+               pr_info("floppy: error %d while reading block 0\n", err);
                set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
        }
        complete(&cbdata->complete);
index 77087a2..a3b042c 100644 (file)
@@ -79,7 +79,7 @@ MODULE_PARM_DESC(home_node, "Home node for the device");
 
 static int queue_mode = NULL_Q_MQ;
 module_param(queue_mode, int, S_IRUGO);
-MODULE_PARM_DESC(use_mq, "Use blk-mq interface (0=bio,1=rq,2=multiqueue)");
+MODULE_PARM_DESC(queue_mode, "Block interface to use (0=bio,1=rq,2=multiqueue)");
 
 static int gb = 250;
 module_param(gb, int, S_IRUGO);
@@ -227,7 +227,10 @@ static void null_cmd_end_timer(struct nullb_cmd *cmd)
 
 static void null_softirq_done_fn(struct request *rq)
 {
-       end_cmd(blk_mq_rq_to_pdu(rq));
+       if (queue_mode == NULL_Q_MQ)
+               end_cmd(blk_mq_rq_to_pdu(rq));
+       else
+               end_cmd(rq->special);
 }
 
 static inline void null_handle_cmd(struct nullb_cmd *cmd)
index bbeb404..b2c98c1 100644 (file)
@@ -1431,6 +1431,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
        return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
 }
 
+static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
+{
+       struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
+
+       return obj_request->img_offset <
+           round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
+}
+
 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
 {
        dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -2748,7 +2756,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
         */
        if (!img_request_write_test(img_request) ||
                !img_request_layered_test(img_request) ||
-               rbd_dev->parent_overlap <= obj_request->img_offset ||
+               !obj_request_overlaps_parent(obj_request) ||
                ((known = obj_request_known_test(obj_request)) &&
                        obj_request_exists_test(obj_request))) {
 
index 48eccb3..36e54be 100644 (file)
@@ -624,7 +624,16 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
        zram->disksize = 0;
        if (reset_capacity)
                set_capacity(zram->disk, 0);
+
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       if (reset_capacity)
+               revalidate_disk(zram->disk);
 }
 
 static ssize_t disksize_store(struct device *dev,
@@ -665,6 +674,14 @@ static ssize_t disksize_store(struct device *dev,
        zram->disksize = disksize;
        set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
        up_write(&zram->init_lock);
+
+       /*
+        * Revalidate disk out of the init_lock to avoid lockdep splat.
+        * It's okay because disk's capacity is protected by init_lock
+        * so that revalidate_disk always sees up-to-date capacity.
+        */
+       revalidate_disk(zram->disk);
+
        return len;
 
 out_destroy_comp:
index a118ec1..1f37d98 100644 (file)
@@ -45,7 +45,7 @@ config OMAP_INTERCONNECT
 
 config ARM_CCI
        bool "ARM CCI driver support"
-       depends on ARM
+       depends on ARM && OF && CPU_V7
        help
          Driver supporting the CCI cache coherent interconnect for ARM
          platforms.
index 334601c..c4419ea 100644 (file)
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
 static int data_avail;
 static u8 *rng_buffer;
 
+static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
+                              int wait);
+
 static size_t rng_buffer_size(void)
 {
        return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
 }
 
+static void add_early_randomness(struct hwrng *rng)
+{
+       unsigned char bytes[16];
+       int bytes_read;
+
+       /*
+        * Currently only virtio-rng cannot return data during device
+        * probe, and that's handled in virtio-rng.c itself.  If there
+        * are more such devices, this call to rng_get_data can be
+        * made conditional here instead of doing it per-device.
+        */
+       bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
+       if (bytes_read > 0)
+               add_device_randomness(bytes, bytes_read);
+}
+
 static inline int hwrng_init(struct hwrng *rng)
 {
-       if (!rng->init)
-               return 0;
-       return rng->init(rng);
+       if (rng->init) {
+               int ret;
+
+               ret =  rng->init(rng);
+               if (ret)
+                       return ret;
+       }
+       add_early_randomness(rng);
+       return 0;
 }
 
 static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
 {
        int err = -EINVAL;
        struct hwrng *old_rng, *tmp;
-       unsigned char bytes[16];
-       int bytes_read;
 
        if (rng->name == NULL ||
            (rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
        INIT_LIST_HEAD(&rng->list);
        list_add_tail(&rng->list, &rng_list);
 
-       bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
-       if (bytes_read > 0)
-               add_device_randomness(bytes, bytes_read);
+       if (old_rng && !rng->init) {
+               /*
+                * Use a new device's input to add some randomness to
+                * the system.  If this rng device isn't going to be
+                * used right away, its init function hasn't been
+                * called yet; so only use the randomness from devices
+                * that don't need an init callback.
+                */
+               add_early_randomness(rng);
+       }
+
 out_unlock:
        mutex_unlock(&rng_mutex);
 out:
index f3e7150..e9b15bc 100644 (file)
@@ -38,6 +38,8 @@ struct virtrng_info {
        int index;
 };
 
+static bool probe_done;
+
 static void random_recv_done(struct virtqueue *vq)
 {
        struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
        int ret;
        struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
 
+       /*
+        * Don't ask host for data till we're setup.  This call can
+        * happen during hwrng_register(), after commit d9e7972619.
+        */
+       if (unlikely(!probe_done))
+               return 0;
+
        if (!vi->busy) {
                vi->busy = true;
                init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
                return err;
        }
 
+       probe_done = true;
        return 0;
 }
 
index d915707..93dcad0 100644 (file)
@@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs)
        if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
                return -ENOMEM;
        cpumask_copy(old_mask, &current->cpus_allowed);
-       set_cpus_allowed_ptr(current, cpumask_of(0));
+       rc = set_cpus_allowed_ptr(current, cpumask_of(0));
+       if (rc)
+               goto out;
        if (smp_processor_id() != 0) {
                rc = -EBUSY;
                goto out;
index 4ad71ef..71529e1 100644 (file)
@@ -641,7 +641,7 @@ retry:
                } while (unlikely(entropy_count < pool_size-2 && pnfrac));
        }
 
-       if (entropy_count < 0) {
+       if (unlikely(entropy_count < 0)) {
                pr_warn("random: negative entropy/overflow: pool %s count %d\n",
                        r->name, entropy_count);
                WARN_ON(1);
@@ -980,26 +980,37 @@ static void push_to_pool(struct work_struct *work)
 static size_t account(struct entropy_store *r, size_t nbytes, int min,
                      int reserved)
 {
-       int have_bytes;
        int entropy_count, orig;
-       size_t ibytes;
+       size_t ibytes, nfrac;
 
        BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
 
        /* Can we pull enough? */
 retry:
        entropy_count = orig = ACCESS_ONCE(r->entropy_count);
-       have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
        ibytes = nbytes;
        /* If limited, never pull more than available */
-       if (r->limit)
-               ibytes = min_t(size_t, ibytes, have_bytes - reserved);
+       if (r->limit) {
+               int have_bytes = entropy_count >> (ENTROPY_SHIFT + 3);
+
+               if ((have_bytes -= reserved) < 0)
+                       have_bytes = 0;
+               ibytes = min_t(size_t, ibytes, have_bytes);
+       }
        if (ibytes < min)
                ibytes = 0;
-       if (have_bytes >= ibytes + reserved)
-               entropy_count -= ibytes << (ENTROPY_SHIFT + 3);
+
+       if (unlikely(entropy_count < 0)) {
+               pr_warn("random: negative entropy count: pool %s count %d\n",
+                       r->name, entropy_count);
+               WARN_ON(1);
+               entropy_count = 0;
+       }
+       nfrac = ibytes << (ENTROPY_SHIFT + 3);
+       if ((size_t) entropy_count > nfrac)
+               entropy_count -= nfrac;
        else
-               entropy_count = reserved << (ENTROPY_SHIFT + 3);
+               entropy_count = 0;
 
        if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
                goto retry;
@@ -1375,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
                            "with %d bits of entropy available\n",
                            current->comm, nonblocking_pool.entropy_total);
 
+       nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
        ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
 
        trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
index 9b7b585..3757e9e 100644 (file)
@@ -230,16 +230,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
                        goto err_reg;
                }
 
-               s2mps11_clk->lookup = devm_kzalloc(&pdev->dev,
-                                       sizeof(struct clk_lookup), GFP_KERNEL);
+               s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
+                                       s2mps11_name(s2mps11_clk), NULL);
                if (!s2mps11_clk->lookup) {
                        ret = -ENOMEM;
                        goto err_lup;
                }
 
-               s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
-               s2mps11_clk->lookup->clk = s2mps11_clk->clk;
-
                clkdev_add(s2mps11_clk->lookup);
        }
 
index 12f3c0b..4c449b3 100644 (file)
@@ -1209,7 +1209,7 @@ static struct clk_branch rot_clk = {
 
 static u8 mmcc_pxo_hdmi_map[] = {
        [P_PXO]         = 0,
-       [P_HDMI_PLL]    = 2,
+       [P_HDMI_PLL]    = 3,
 };
 
 static const char *mmcc_pxo_hdmi[] = {
index 4f150c9..7f4a473 100644 (file)
@@ -925,21 +925,13 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
        GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
                        0, 0),
        GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
-       GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp",
-                       E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
-                       E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
-                       E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
-                       E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
-       GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
+       GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "div_pwm_isp",
                        E4X12_GATE_IP_ISP, 0, 0, 0),
-       GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp",
+       GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "div_spi0_isp_pre",
                        E4X12_GATE_IP_ISP, 1, 0, 0),
-       GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp",
+       GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "div_spi1_isp_pre",
                        E4X12_GATE_IP_ISP, 2, 0, 0),
-       GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp",
+       GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "div_uart_isp",
                        E4X12_GATE_IP_ISP, 3, 0, 0),
        GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
        GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
index 1fad4c5..184f642 100644 (file)
@@ -661,7 +661,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
        GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
        GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
        GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
-                       GATE_IP_DISP1, 2, 0, 0),
+                       GATE_IP_DISP1, 9, 0, 0),
        GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
                        GATE_IP_DISP1, 8, 0, 0),
        GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
index 9d7d7ee..a4e6cc7 100644 (file)
@@ -631,7 +631,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
                        SRC_TOP4, 16, 1),
        MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1),
        MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1),
-       MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1),
+       MUX(CLK_MOUT_USER_ACLK333, "mout_user_aclk333", mout_user_aclk333_p,
+                       SRC_TOP4, 28, 1),
 
        MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p,
                        SRC_TOP5, 0, 1),
@@ -684,7 +685,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
                        SRC_TOP11, 12, 1),
        MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1),
        MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1),
-       MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1),
+       MUX(CLK_MOUT_SW_ACLK333, "mout_sw_aclk333", mout_sw_aclk333_p,
+                       SRC_TOP11, 28, 1),
 
        MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p,
                        SRC_TOP12, 4, 1),
@@ -890,8 +892,6 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
                        GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
                        GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
-       GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric",
-                       GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0),
        GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
                        GATE_BUS_TOP, 13, 0, 0),
        GATE(0, "aclk166", "mout_user_aclk166",
@@ -994,34 +994,61 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
                        SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
 
        /* PERIC Block */
-       GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0),
-       GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0),
-       GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0),
-       GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0),
-       GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0),
-       GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0),
-       GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0),
-       GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0),
-       GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0),
-       GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0),
-       GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0),
-       GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0),
-       GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0),
-       GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0),
-       GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0),
-       GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0),
-       GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0),
-       GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0),
-       GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0),
-       GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0),
-       GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0),
-       GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0),
-       GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0),
-       GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0),
-       GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0),
-       GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0),
-
-       GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0),
+       GATE(CLK_UART0, "uart0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 0, 0, 0),
+       GATE(CLK_UART1, "uart1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 1, 0, 0),
+       GATE(CLK_UART2, "uart2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 2, 0, 0),
+       GATE(CLK_UART3, "uart3", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 3, 0, 0),
+       GATE(CLK_I2C0, "i2c0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 6, 0, 0),
+       GATE(CLK_I2C1, "i2c1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 7, 0, 0),
+       GATE(CLK_I2C2, "i2c2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 8, 0, 0),
+       GATE(CLK_I2C3, "i2c3", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 9, 0, 0),
+       GATE(CLK_USI0, "usi0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 10, 0, 0),
+       GATE(CLK_USI1, "usi1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 11, 0, 0),
+       GATE(CLK_USI2, "usi2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 12, 0, 0),
+       GATE(CLK_USI3, "usi3", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 13, 0, 0),
+       GATE(CLK_I2C_HDMI, "i2c_hdmi", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 14, 0, 0),
+       GATE(CLK_TSADC, "tsadc", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 15, 0, 0),
+       GATE(CLK_SPI0, "spi0", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 16, 0, 0),
+       GATE(CLK_SPI1, "spi1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 17, 0, 0),
+       GATE(CLK_SPI2, "spi2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 18, 0, 0),
+       GATE(CLK_I2S1, "i2s1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 20, 0, 0),
+       GATE(CLK_I2S2, "i2s2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 21, 0, 0),
+       GATE(CLK_PCM1, "pcm1", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 22, 0, 0),
+       GATE(CLK_PCM2, "pcm2", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 23, 0, 0),
+       GATE(CLK_PWM, "pwm", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 24, 0, 0),
+       GATE(CLK_SPDIF, "spdif", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 26, 0, 0),
+       GATE(CLK_USI4, "usi4", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 28, 0, 0),
+       GATE(CLK_USI5, "usi5", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 30, 0, 0),
+       GATE(CLK_USI6, "usi6", "mout_user_aclk66_peric",
+                       GATE_IP_PERIC, 31, 0, 0),
+
+       GATE(CLK_KEYIF, "keyif", "mout_user_aclk66_peric",
+                       GATE_BUS_PERIC, 22, 0, 0),
 
        /* PERIS Block */
        GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
index ba07168..140f473 100644 (file)
@@ -152,6 +152,11 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
        ALIAS(HCLK, NULL, "hclk"),
        ALIAS(MPLL, NULL, "mpll"),
        ALIAS(FCLK, NULL, "fclk"),
+       ALIAS(PCLK, NULL, "watchdog"),
+       ALIAS(PCLK_SDI, NULL, "sdi"),
+       ALIAS(HCLK_NAND, NULL, "nand"),
+       ALIAS(PCLK_I2S, NULL, "iis"),
+       ALIAS(PCLK_I2C, NULL, "i2c"),
 };
 
 /* S3C2410 specific clocks */
@@ -378,7 +383,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
        if (!np)
                s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
 
-       if (current_soc == 2410) {
+       if (current_soc == S3C2410) {
                if (_get_rate("xti") == 12 * MHZ) {
                        s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
                        s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
@@ -432,7 +437,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
                samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
                                ARRAY_SIZE(s3c2410_ffactor));
                samsung_clk_register_alias(ctx, s3c2410_aliases,
-                       ARRAY_SIZE(s3c2410_common_aliases));
+                       ARRAY_SIZE(s3c2410_aliases));
                break;
        case S3C2440:
                samsung_clk_register_mux(ctx, s3c2440_muxes,
index efa16ee..8889ff1 100644 (file)
@@ -418,8 +418,10 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
        ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
        ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
        ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
-       ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"),
-       ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"),
+       ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi_busclk0"),
+       ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi_busclk2"),
+       ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi_busclk0"),
+       ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi_busclk2"),
        ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
        ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
        ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
index c2d2043..bb5f387 100644 (file)
@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
 /* array of all spear 320 clock lookups */
 #ifdef CONFIG_MACH_SPEAR320
 
-#define SPEAR320_CONTROL_REG           (soc_config_base + 0x0000)
+#define SPEAR320_CONTROL_REG           (soc_config_base + 0x0010)
 #define SPEAR320_EXT_CTRL_REG          (soc_config_base + 0x0018)
 
        #define SPEAR320_UARTX_PCLK_MASK                0x1
@@ -245,7 +245,8 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
        "ras_syn0_gclk", };
 static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
 
-static void __init spear320_clk_init(void __iomem *soc_config_base)
+static void __init spear320_clk_init(void __iomem *soc_config_base,
+                                    struct clk *ras_apb_clk)
 {
        struct clk *clk;
 
@@ -342,6 +343,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
                        SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
                        0, &_lock);
        clk_register_clkdev(clk, NULL, "a3000000.serial");
+       /* Enforce ras_apb_clk */
+       clk_set_parent(clk, ras_apb_clk);
 
        clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
                        ARRAY_SIZE(uartx_parents),
@@ -349,6 +352,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
                        SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
                        SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
        clk_register_clkdev(clk, NULL, "a4000000.serial");
+       /* Enforce ras_apb_clk */
+       clk_set_parent(clk, ras_apb_clk);
 
        clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
                        ARRAY_SIZE(uartx_parents),
@@ -379,12 +384,12 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
        clk_register_clkdev(clk, NULL, "60100000.serial");
 }
 #else
-static inline void spear320_clk_init(void __iomem *soc_config_base) { }
+static inline void spear320_clk_init(void __iomem *sb, struct clk *rc) { }
 #endif
 
 void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
 {
-       struct clk *clk, *clk1;
+       struct clk *clk, *clk1, *ras_apb_clk;
 
        clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
                        32000);
@@ -613,6 +618,7 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
        clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
                        RAS_APB_CLK_ENB, 0, &_lock);
        clk_register_clkdev(clk, "ras_apb_clk", NULL);
+       ras_apb_clk = clk;
 
        clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
                        RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
@@ -659,5 +665,5 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
        else if (of_machine_is_compatible("st,spear310"))
                spear310_clk_init();
        else if (of_machine_is_compatible("st,spear320"))
-               spear320_clk_init(soc_config_base);
+               spear320_clk_init(soc_config_base, ras_apb_clk);
 }
index 44cd27c..670f90d 100644 (file)
@@ -29,7 +29,7 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
 
        r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        reg = devm_ioremap_resource(&pdev->dev, r);
-       if (!reg)
+       if (IS_ERR(reg))
                return PTR_ERR(reg);
 
        clk_parent = of_clk_get_parent_name(np, 0);
index 5428c9c..72d9727 100644 (file)
@@ -77,13 +77,11 @@ static int dra7_apll_enable(struct clk_hw *hw)
        if (i == MAX_APLL_WAIT_TRIES) {
                pr_warn("clock: %s failed transition to '%s'\n",
                        clk_name, (state) ? "locked" : "bypassed");
-       } else {
+               r = -EBUSY;
+       } else
                pr_debug("clock: %s transition to '%s' in %d loops\n",
                         clk_name, (state) ? "locked" : "bypassed", i);
 
-               r = 0;
-       }
-
        return r;
 }
 
@@ -338,7 +336,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
        const char *parent_name;
        u32 val;
 
-       ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
+       ad = kzalloc(sizeof(*ad), GFP_KERNEL);
        clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
        init = kzalloc(sizeof(*init), GFP_KERNEL);
 
index abd956d..79791e1 100644 (file)
@@ -161,7 +161,8 @@ cleanup:
 }
 
 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
-       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX)
+       defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
+       defined(CONFIG_SOC_AM43XX)
 /**
  * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
  * @node: device node for this clock
@@ -322,7 +323,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
               of_ti_omap4_dpll_x2_setup);
 #endif
 
-#ifdef CONFIG_SOC_AM33XX
+#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
 {
        ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
index 0197a47..e9d650e 100644 (file)
@@ -160,7 +160,7 @@ static void of_mux_clk_setup(struct device_node *node)
        u8 clk_mux_flags = 0;
        u32 mask = 0;
        u32 shift = 0;
-       u32 flags = 0;
+       u32 flags = CLK_SET_RATE_NO_REPARENT;
 
        num_parents = of_clk_get_parent_count(node);
        if (num_parents < 2) {
index 8d64200..ab51bf2 100644 (file)
@@ -153,19 +153,16 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
 }
 
 /* Clocksource handling */
-static void exynos4_mct_frc_start(u32 hi, u32 lo)
+static void exynos4_mct_frc_start(void)
 {
        u32 reg;
 
-       exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
-       exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
-
        reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
        reg |= MCT_G_TCON_START;
        exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
 }
 
-static cycle_t exynos4_frc_read(struct clocksource *cs)
+static cycle_t notrace _exynos4_frc_read(void)
 {
        unsigned int lo, hi;
        u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
@@ -179,9 +176,14 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
        return ((cycle_t)hi << 32) | lo;
 }
 
+static cycle_t exynos4_frc_read(struct clocksource *cs)
+{
+       return _exynos4_frc_read();
+}
+
 static void exynos4_frc_resume(struct clocksource *cs)
 {
-       exynos4_mct_frc_start(0, 0);
+       exynos4_mct_frc_start();
 }
 
 struct clocksource mct_frc = {
@@ -195,12 +197,23 @@ struct clocksource mct_frc = {
 
 static u64 notrace exynos4_read_sched_clock(void)
 {
-       return exynos4_frc_read(&mct_frc);
+       return _exynos4_frc_read();
+}
+
+static struct delay_timer exynos4_delay_timer;
+
+static cycles_t exynos4_read_current_timer(void)
+{
+       return _exynos4_frc_read();
 }
 
 static void __init exynos4_clocksource_init(void)
 {
-       exynos4_mct_frc_start(0, 0);
+       exynos4_mct_frc_start();
+
+       exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
+       exynos4_delay_timer.freq = clk_rate;
+       register_current_timer_delay(&exynos4_delay_timer);
 
        if (clocksource_register_hz(&mct_frc, clk_rate))
                panic("%s: can't register clocksource\n", mct_frc.name);
index e473d65..ffe350f 100644 (file)
@@ -186,6 +186,8 @@ config CPU_FREQ_GOV_CONSERVATIVE
 config GENERIC_CPUFREQ_CPU0
        tristate "Generic CPU0 cpufreq driver"
        depends on HAVE_CLK && OF
+       # if CPU_THERMAL is on and THERMAL=m, CPU0 cannot be =y:
+       depends on !CPU_THERMAL || THERMAL
        select PM_OPP
        help
          This adds a generic cpufreq driver for CPU0 frequency management.
index ebac671..7364a53 100644 (file)
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
        tristate "Freescale i.MX6 cpufreq support"
        depends on ARCH_MXC
        depends on REGULATOR_ANATOP
+       select PM_OPP
        help
          This adds cpufreq driver support for Freescale i.MX6 series SoCs.
 
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
          If in doubt, say Y.
 
 config ARM_KIRKWOOD_CPUFREQ
-       def_bool MACH_KIRKWOOD
+       def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
        help
          This adds the CPUFreq driver for Marvell Kirkwood
          SoCs.
index 738c8b7..db6d9a2 100644 (file)
@@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ)  += arm_big_little.o
 # LITTLE drivers, so that it is probed last.
 obj-$(CONFIG_ARM_DT_BL_CPUFREQ)                += arm_big_little_dt.o
 
-obj-$(CONFIG_ARCH_DAVINCI_DA850)       += davinci-cpufreq.o
+obj-$(CONFIG_ARCH_DAVINCI)             += davinci-cpufreq.o
 obj-$(CONFIG_UX500_SOC_DB8500)         += dbx500-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS_CPUFREQ)       += exynos-cpufreq.o
 obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ)   += exynos4210-cpufreq.o
index ee1ae30..86beda9 100644 (file)
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
                goto out_put_reg;
        }
 
-       ret = of_init_opp_table(cpu_dev);
-       if (ret) {
-               pr_err("failed to init OPP table: %d\n", ret);
-               goto out_put_clk;
-       }
+       /* OPPs might be populated at runtime, don't check for error here */
+       of_init_opp_table(cpu_dev);
 
        ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
        if (ret) {
index aed2b0c..6f02485 100644 (file)
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
         * the creation of a brand new one. So we need to perform this update
         * by invoking update_policy_cpu().
         */
-       if (recover_policy && cpu != policy->cpu)
+       if (recover_policy && cpu != policy->cpu) {
                update_policy_cpu(policy, cpu);
-       else
+               WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
+       } else {
                policy->cpu = cpu;
+       }
 
        cpumask_copy(policy->cpus, cpumask_of(cpu));
 
@@ -2242,10 +2244,8 @@ int cpufreq_update_policy(unsigned int cpu)
        struct cpufreq_policy new_policy;
        int ret;
 
-       if (!policy) {
-               ret = -ENODEV;
-               goto no_policy;
-       }
+       if (!policy)
+               return -ENODEV;
 
        down_write(&policy->rwsem);
 
@@ -2264,7 +2264,7 @@ int cpufreq_update_policy(unsigned int cpu)
                new_policy.cur = cpufreq_driver->get(cpu);
                if (WARN_ON(!new_policy.cur)) {
                        ret = -EIO;
-                       goto no_policy;
+                       goto unlock;
                }
 
                if (!policy->cur) {
@@ -2279,10 +2279,10 @@ int cpufreq_update_policy(unsigned int cpu)
 
        ret = cpufreq_set_policy(policy, &new_policy);
 
+unlock:
        up_write(&policy->rwsem);
 
        cpufreq_cpu_put(policy);
-no_policy:
        return ret;
 }
 EXPORT_SYMBOL(cpufreq_update_policy);
index 4e7f492..86631cb 100644 (file)
@@ -128,6 +128,7 @@ static struct pstate_funcs pstate_funcs;
 
 struct perf_limits {
        int no_turbo;
+       int turbo_disabled;
        int max_perf_pct;
        int min_perf_pct;
        int32_t max_perf;
@@ -196,10 +197,7 @@ static signed int pid_calc(struct _pid *pid, int32_t busy)
        pid->last_err = fp_error;
 
        result = pterm + mul_fp(pid->integral, pid->i_gain) + dterm;
-       if (result >= 0)
-               result = result + (1 << (FRAC_BITS-1));
-       else
-               result = result - (1 << (FRAC_BITS-1));
+       result = result + (1 << (FRAC_BITS-1));
        return (signed int)fp_toint(result);
 }
 
@@ -290,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
        if (ret != 1)
                return -EINVAL;
        limits.no_turbo = clamp_t(int, input, 0 , 1);
-
+       if (limits.turbo_disabled) {
+               pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
+               limits.no_turbo = limits.turbo_disabled;
+       }
        return count;
 }
 
@@ -360,21 +361,21 @@ static int byt_get_min_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 8) & 0x3F;
+       return (value >> 8) & 0x7F;
 }
 
 static int byt_get_max_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_RATIOS, value);
-       return (value >> 16) & 0x3F;
+       return (value >> 16) & 0x7F;
 }
 
 static int byt_get_turbo_pstate(void)
 {
        u64 value;
        rdmsrl(BYT_TURBO_RATIOS, value);
-       return value & 0x3F;
+       return value & 0x7F;
 }
 
 static void byt_set_pstate(struct cpudata *cpudata, int pstate)
@@ -384,7 +385,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
        u32 vid;
 
        val = pstate << 8;
-       if (limits.no_turbo)
+       if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
        vid_fp = cpudata->vid.min + mul_fp(
@@ -408,8 +409,8 @@ static void byt_get_vid(struct cpudata *cpudata)
 
 
        rdmsrl(BYT_VIDS, value);
-       cpudata->vid.min = int_tofp((value >> 8) & 0x3f);
-       cpudata->vid.max = int_tofp((value >> 16) & 0x3f);
+       cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
+       cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
        cpudata->vid.ratio = div_fp(
                cpudata->vid.max - cpudata->vid.min,
                int_tofp(cpudata->pstate.max_pstate -
@@ -451,7 +452,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
        u64 val;
 
        val = pstate << 8;
-       if (limits.no_turbo)
+       if (limits.no_turbo && !limits.turbo_disabled)
                val |= (u64)1 << 32;
 
        wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -699,9 +700,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
 
        cpu = all_cpu_data[cpunum];
 
-       intel_pstate_get_cpu_pstates(cpu);
-
        cpu->cpu = cpunum;
+       intel_pstate_get_cpu_pstates(cpu);
 
        init_timer_deferrable(&cpu->timer);
        cpu->timer.function = intel_pstate_timer_func;
@@ -744,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
                limits.min_perf = int_tofp(1);
                limits.max_perf_pct = 100;
                limits.max_perf = int_tofp(1);
-               limits.no_turbo = 0;
+               limits.no_turbo = limits.turbo_disabled;
                return 0;
        }
        limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -787,6 +787,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 {
        struct cpudata *cpu;
        int rc;
+       u64 misc_en;
 
        rc = intel_pstate_init_cpu(policy->cpu);
        if (rc)
@@ -794,8 +795,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
 
        cpu = all_cpu_data[policy->cpu];
 
-       if (!limits.no_turbo &&
-               limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
+       rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
+       if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
+               cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
+               limits.turbo_disabled = 1;
+               limits.no_turbo = 1;
+       }
+       if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
                policy->policy = CPUFREQ_POLICY_PERFORMANCE;
        else
                policy->policy = CPUFREQ_POLICY_POWERSAVE;
index 5463767..b5befc2 100644 (file)
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
                        name = "K4S641632D";
                if (machine_is_h3100())
                        name = "KM416S4030CT";
-               if (machine_is_jornada720())
+               if (machine_is_jornada720() || machine_is_h3600())
                        name = "K4S281632B-1H";
                if (machine_is_nanoengine())
                        name = "MT48LC8M16A2TG-75";
index 28587d0..a5fba02 100644 (file)
@@ -55,7 +55,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
                .power_usage            = 50,
                .target_residency       = 100,
                .flags                  = CPUIDLE_FLAG_TIME_VALID,
-               .name                   = "MV CPU IDLE",
+               .name                   = "Idle",
                .desc                   = "CPU power down",
        },
        .states[2]              = {
@@ -65,7 +65,7 @@ static struct cpuidle_driver armada_370_xp_idle_driver = {
                .target_residency       = 1000,
                .flags                  = CPUIDLE_FLAG_TIME_VALID |
                                                ARMADA_370_XP_FLAG_DEEP_IDLE,
-               .name                   = "MV CPU DEEP IDLE",
+               .name                   = "Deep idle",
                .desc                   = "CPU and L2 Fabric power down",
        },
        .state_count = ARMADA_370_XP_MAX_STATES,
index 1d80bd3..b512a4b 100644 (file)
@@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev)
        int error;
 
        jrdev = &pdev->dev;
-       jrpriv = kmalloc(sizeof(struct caam_drv_private_jr),
-                        GFP_KERNEL);
+       jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
+                             GFP_KERNEL);
        if (!jrpriv)
                return -ENOMEM;
 
@@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev)
 
        /* Now do the platform independent part */
        error = caam_jr_init(jrdev); /* now turn on hardware */
-       if (error) {
-               kfree(jrpriv);
+       if (error)
                return error;
-       }
 
        jrpriv->dev = jrdev;
        spin_lock(&driver_data.jr_alloc_lock);
index d028f36..8f8b0b6 100644 (file)
@@ -86,6 +86,9 @@
 
 #define USBSS_IRQ_PD_COMP      (1 <<  2)
 
+/* Packet Descriptor */
+#define PD2_ZERO_LENGTH                (1 << 19)
+
 struct cppi41_channel {
        struct dma_chan chan;
        struct dma_async_tx_descriptor txd;
@@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                        __iormb();
 
                while (val) {
-                       u32 desc;
+                       u32 desc, len;
 
                        q_num = __fls(val);
                        val &= ~(1 << q_num);
@@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data)
                                                q_num, desc);
                                continue;
                        }
-                       c->residue = pd_trans_len(c->desc->pd6) -
-                               pd_trans_len(c->desc->pd0);
 
+                       if (c->desc->pd2 & PD2_ZERO_LENGTH)
+                               len = 0;
+                       else
+                               len = pd_trans_len(c->desc->pd0);
+
+                       c->residue = pd_trans_len(c->desc->pd6) - len;
                        dma_cookie_complete(&c->txd);
                        c->txd.callback(c->txd.callback_param);
                }
index 1287146..14867e3 100644 (file)
@@ -255,6 +255,7 @@ struct sdma_channel {
        enum dma_slave_buswidth         word_size;
        unsigned int                    buf_tail;
        unsigned int                    num_bd;
+       unsigned int                    period_len;
        struct sdma_buffer_descriptor   *bd;
        dma_addr_t                      bd_phys;
        unsigned int                    pc_from_device, pc_to_device;
@@ -592,6 +593,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
 }
 
 static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
+{
+       if (sdmac->desc.callback)
+               sdmac->desc.callback(sdmac->desc.callback_param);
+}
+
+static void sdma_update_channel_loop(struct sdma_channel *sdmac)
 {
        struct sdma_buffer_descriptor *bd;
 
@@ -611,9 +618,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
                bd->mode.status |= BD_DONE;
                sdmac->buf_tail++;
                sdmac->buf_tail %= sdmac->num_bd;
-
-               if (sdmac->desc.callback)
-                       sdmac->desc.callback(sdmac->desc.callback_param);
        }
 }
 
@@ -669,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
                int channel = fls(stat) - 1;
                struct sdma_channel *sdmac = &sdma->channel[channel];
 
+               if (sdmac->flags & IMX_DMA_SG_LOOP)
+                       sdma_update_channel_loop(sdmac);
+
                tasklet_schedule(&sdmac->tasklet);
 
                __clear_bit(channel, &stat);
@@ -1129,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
        sdmac->status = DMA_IN_PROGRESS;
 
        sdmac->buf_tail = 0;
+       sdmac->period_len = period_len;
 
        sdmac->flags |= IMX_DMA_SG_LOOP;
        sdmac->direction = direction;
@@ -1225,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
                                      struct dma_tx_state *txstate)
 {
        struct sdma_channel *sdmac = to_sdma_chan(chan);
+       u32 residue;
+
+       if (sdmac->flags & IMX_DMA_SG_LOOP)
+               residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
+       else
+               residue = sdmac->chn_count - sdmac->chn_real_count;
 
        dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
-                       sdmac->chn_count - sdmac->chn_real_count);
+                        residue);
 
        return sdmac->status;
 }
index 4199849..145974f 100644 (file)
@@ -1,4 +1,5 @@
 menu "IEEE 1394 (FireWire) support"
+       depends on HAS_DMA
        depends on PCI || COMPILE_TEST
        # firewire-core does not depend on PCI but is
        # not useful without PCI controller driver
index c398645..2c68da1 100644 (file)
@@ -1460,7 +1460,8 @@ static int fwnet_probe(struct fw_unit *unit,
                goto have_dev;
        }
 
-       net = alloc_netdev(sizeof(*dev), "firewire%d", fwnet_init_dev);
+       net = alloc_netdev(sizeof(*dev), "firewire%d", NET_NAME_UNKNOWN,
+                          fwnet_init_dev);
        if (net == NULL) {
                mutex_unlock(&fwnet_device_mutex);
                return -ENOMEM;
index 5798541..a66a321 100644 (file)
@@ -336,10 +336,10 @@ static const struct {
                QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, 0,
-               QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
+               QUIRK_CYCLE_TIMER /* FIXME: necessary? */ | QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT6315, PCI_ANY_ID,
-               0},
+               QUIRK_NO_MSI},
 
        {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
                QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
index 4b9dc83..e992abc 100644 (file)
@@ -40,7 +40,7 @@ struct pstore_read_data {
 static inline u64 generic_id(unsigned long timestamp,
                             unsigned int part, int count)
 {
-       return (timestamp * 100 + part) * 1000 + count;
+       return ((u64) timestamp * 100 + part) * 1000 + count;
 }
 
 static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
index cd36deb..dc79346 100644 (file)
@@ -346,6 +346,7 @@ static __initdata struct {
 
 struct param_info {
        int verbose;
+       int found;
        void *params;
 };
 
@@ -353,25 +354,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
                                       int depth, void *data)
 {
        struct param_info *info = data;
-       void *prop, *dest;
-       unsigned long len;
+       const void *prop;
+       void *dest;
        u64 val;
-       int i;
+       int i, len;
 
        if (depth != 1 ||
            (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
                return 0;
 
-       pr_info("Getting parameters from FDT:\n");
-
        for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
                prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
-               if (!prop) {
-                       pr_err("Can't find %s in device tree!\n",
-                              dt_params[i].name);
+               if (!prop)
                        return 0;
-               }
                dest = info->params + dt_params[i].offset;
+               info->found++;
 
                val = of_read_number(prop, len / sizeof(u32));
 
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
 int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
 {
        struct param_info info;
+       int ret;
+
+       pr_info("Getting EFI parameters from FDT:\n");
 
        info.verbose = verbose;
+       info.found = 0;
        info.params = params;
 
-       return of_scan_flat_dt(fdt_find_uefi_params, &info);
+       ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
+       if (!info.found)
+               pr_info("UEFI not found.\n");
+       else if (!ret)
+               pr_err("Can't find '%s' in device tree!\n",
+                      dt_params[info.found].name);
+
+       return ret;
 }
 #endif /* CONFIG_EFI_PARAMS_FROM_FDT */
index 5c6a8e8..507a3df 100644 (file)
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
        u32 fdt_val32;
        u64 fdt_val64;
 
-       /*
-        * Copy definition of linux_banner here.  Since this code is
-        * built as part of the decompressor for ARM v7, pulling
-        * in version.c where linux_banner is defined for the
-        * kernel brings other kernel dependencies with it.
-        */
-       const char linux_banner[] =
-           "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
-           LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
-
        /* Do some checks on provided FDT, if it exists*/
        if (orig_fdt) {
                if (fdt_check_header(orig_fdt)) {
@@ -63,7 +53,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
         */
        prev = 0;
        for (;;) {
-               const char *type, *name;
+               const char *type;
                int len;
 
                node = fdt_next_node(fdt, prev, NULL);
index fe7c0e2..57adbc9 100644 (file)
@@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
                        if (spi_present_mask & (1 << addr))
                                chips++;
                }
-               if (!chips)
-                       return -ENODEV;
        } else {
                type = spi_get_device_id(spi)->driver_data;
                pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
                if (!(spi_present_mask & (1 << addr)))
                        continue;
                chips--;
-               if (chips < 0) {
-                       dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
-                       goto fail;
-               }
                data->mcp[addr] = &data->chip[chips];
                status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
                                            0x40 | (addr << 1), type, base,
index 0c9f803..b6ae89e 100644 (file)
@@ -284,6 +284,7 @@ static int gpio_rcar_irq_domain_map(struct irq_domain *h, unsigned int irq,
 
 static struct irq_domain_ops gpio_rcar_irq_domain_ops = {
        .map    = gpio_rcar_irq_domain_map,
+       .xlate  = irq_domain_xlate_twocell,
 };
 
 struct gpio_rcar_info {
index 03711d0..8218078 100644 (file)
@@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
                        retcode = -EFAULT;
                        goto err_i1;
                }
-       } else
+       } else if (cmd & IOC_OUT) {
                memset(kdata, 0, usize);
+       }
 
        if (ioctl->flags & DRM_UNLOCKED)
                retcode = func(dev, kdata, file_priv);
index 7c2497d..0dc57d5 100644 (file)
@@ -64,6 +64,7 @@
 void drm_modeset_acquire_init(struct drm_modeset_acquire_ctx *ctx,
                uint32_t flags)
 {
+       memset(ctx, 0, sizeof(*ctx));
        ww_acquire_init(&ctx->ww_ctx, &crtc_ww_class);
        INIT_LIST_HEAD(&ctx->locked);
 }
index 482127f..9e530f2 100644 (file)
@@ -40,7 +40,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
 {
        struct exynos_dpi *ctx = connector_to_dpi(connector);
 
-       if (!ctx->panel->connector)
+       if (ctx->panel && !ctx->panel->connector)
                drm_panel_attach(ctx->panel, &ctx->connector);
 
        return connector_status_connected;
index d91f277..ab7d182 100644 (file)
@@ -765,24 +765,24 @@ static int exynos_drm_init(void)
 
        return 0;
 
-err_unregister_pd:
-       platform_device_unregister(exynos_drm_pdev);
-
 err_remove_vidi:
 #ifdef CONFIG_DRM_EXYNOS_VIDI
        exynos_drm_remove_vidi();
+
+err_unregister_pd:
 #endif
+       platform_device_unregister(exynos_drm_pdev);
 
        return ret;
 }
 
 static void exynos_drm_exit(void)
 {
+       platform_driver_unregister(&exynos_drm_platform_driver);
 #ifdef CONFIG_DRM_EXYNOS_VIDI
        exynos_drm_remove_vidi();
 #endif
        platform_device_unregister(exynos_drm_pdev);
-       platform_driver_unregister(&exynos_drm_platform_driver);
 }
 
 module_init(exynos_drm_init);
index 36535f3..06cde45 100644 (file)
@@ -343,7 +343,7 @@ struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
 int exynos_dpi_remove(struct device *dev);
 #else
 static inline struct exynos_drm_display *
-exynos_dpi_probe(struct device *dev) { return 0; }
+exynos_dpi_probe(struct device *dev) { return NULL; }
 static inline int exynos_dpi_remove(struct device *dev) { return 0; }
 #endif
 
index bb45ab2..33161ad 100644 (file)
@@ -741,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
                win_data = &ctx->win_data[i];
                if (win_data->enabled)
                        fimd_win_commit(mgr, i);
+               else
+                       fimd_win_disable(mgr, i);
        }
 
        fimd_commit(mgr);
index c104d0c..aa259b0 100644 (file)
@@ -2090,6 +2090,11 @@ out:
 
 static void hdmi_dpms(struct exynos_drm_display *display, int mode)
 {
+       struct hdmi_context *hdata = display->ctx;
+       struct drm_encoder *encoder = hdata->encoder;
+       struct drm_crtc *crtc = encoder->crtc;
+       struct drm_crtc_helper_funcs *funcs = NULL;
+
        DRM_DEBUG_KMS("mode %d\n", mode);
 
        switch (mode) {
@@ -2099,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
        case DRM_MODE_DPMS_STANDBY:
        case DRM_MODE_DPMS_SUSPEND:
        case DRM_MODE_DPMS_OFF:
+               /*
+                * The SFRs of VP and Mixer are updated by Vertical Sync of
+                * Timing generator which is a part of HDMI so the sequence
+                * to disable TV Subsystem should be as following,
+                *      VP -> Mixer -> HDMI
+                *
+                * Below codes will try to disable Mixer and VP(if used)
+                * prior to disabling HDMI.
+                */
+               if (crtc)
+                       funcs = crtc->helper_private;
+               if (funcs && funcs->dpms)
+                       (*funcs->dpms)(crtc, mode);
+
                hdmi_poweroff(display);
                break;
        default:
index 4c5aed7..7529946 100644 (file)
@@ -377,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
        mixer_regs_dump(ctx);
 }
 
+static void mixer_stop(struct mixer_context *ctx)
+{
+       struct mixer_resources *res = &ctx->mixer_res;
+       int timeout = 20;
+
+       mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
+
+       while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
+                       --timeout)
+               usleep_range(10000, 12000);
+
+       mixer_regs_dump(ctx);
+}
+
 static void vp_video_buffer(struct mixer_context *ctx, int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
@@ -497,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
 static void mixer_layer_update(struct mixer_context *ctx)
 {
        struct mixer_resources *res = &ctx->mixer_res;
-       u32 val;
-
-       val = mixer_reg_read(res, MXR_CFG);
 
-       /* allow one update per vsync only */
-       if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
-               mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
+       mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
 }
 
 static void mixer_graph_buffer(struct mixer_context *ctx, int win)
@@ -1010,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
        }
        mutex_unlock(&mixer_ctx->mixer_mutex);
 
+       drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
+
        atomic_set(&mixer_ctx->wait_vsync_event, 1);
 
        /*
@@ -1020,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
                                !atomic_read(&mixer_ctx->wait_vsync_event),
                                HZ/20))
                DRM_DEBUG_KMS("vblank wait timed out.\n");
+
+       drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
 }
 
 static void mixer_window_suspend(struct exynos_drm_manager *mgr)
@@ -1061,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
                mutex_unlock(&ctx->mixer_mutex);
                return;
        }
-       ctx->powered = true;
+
        mutex_unlock(&ctx->mixer_mutex);
 
        pm_runtime_get_sync(ctx->dev);
@@ -1072,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
                clk_prepare_enable(res->sclk_mixer);
        }
 
+       mutex_lock(&ctx->mixer_mutex);
+       ctx->powered = true;
+       mutex_unlock(&ctx->mixer_mutex);
+
+       mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
+
        mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
        mixer_win_reset(ctx);
 
@@ -1084,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
        struct mixer_resources *res = &ctx->mixer_res;
 
        mutex_lock(&ctx->mixer_mutex);
-       if (!ctx->powered)
-               goto out;
+       if (!ctx->powered) {
+               mutex_unlock(&ctx->mixer_mutex);
+               return;
+       }
        mutex_unlock(&ctx->mixer_mutex);
 
+       mixer_stop(ctx);
        mixer_window_suspend(mgr);
 
        ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
 
+       mutex_lock(&ctx->mixer_mutex);
+       ctx->powered = false;
+       mutex_unlock(&ctx->mixer_mutex);
+
        clk_disable_unprepare(res->mixer);
        if (ctx->vp_enabled) {
                clk_disable_unprepare(res->vp);
@@ -1099,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
        }
 
        pm_runtime_put_sync(ctx->dev);
-
-       mutex_lock(&ctx->mixer_mutex);
-       ctx->powered = false;
-
-out:
-       mutex_unlock(&ctx->mixer_mutex);
 }
 
 static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
index 4537026..5f32e1a 100644 (file)
@@ -78,6 +78,7 @@
 #define MXR_STATUS_BIG_ENDIAN          (1 << 3)
 #define MXR_STATUS_ENDIAN_MASK         (1 << 3)
 #define MXR_STATUS_SYNC_ENABLE         (1 << 2)
+#define MXR_STATUS_REG_IDLE            (1 << 1)
 #define MXR_STATUS_REG_RUN             (1 << 0)
 
 /* bits for MXR_CFG */
index 240c331..ac357b0 100644 (file)
@@ -810,6 +810,12 @@ static int
 tda998x_encoder_mode_valid(struct drm_encoder *encoder,
                          struct drm_display_mode *mode)
 {
+       if (mode->clock > 150000)
+               return MODE_CLOCK_HIGH;
+       if (mode->htotal >= BIT(13))
+               return MODE_BAD_HVALUE;
+       if (mode->vtotal >= BIT(11))
+               return MODE_BAD_VVALUE;
        return MODE_OK;
 }
 
@@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
                        return i;
                }
        } else {
-               for (i = 10; i > 0; i--) {
-                       msleep(10);
+               for (i = 100; i > 0; i--) {
+                       msleep(1);
                        ret = reg_read(priv, REG_INT_FLAGS_2);
                        if (ret < 0)
                                return ret;
@@ -1183,7 +1189,6 @@ static void
 tda998x_encoder_destroy(struct drm_encoder *encoder)
 {
        struct tda998x_priv *priv = to_tda998x_priv(encoder);
-       drm_i2c_encoder_destroy(encoder);
 
        /* disable all IRQs and free the IRQ handler */
        cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
@@ -1193,6 +1198,7 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
 
        if (priv->cec)
                i2c_unregister_device(priv->cec);
+       drm_i2c_encoder_destroy(encoder);
        kfree(priv);
 }
 
index 601caa8..b8c6892 100644 (file)
@@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
 
                memset(&stats, 0, sizeof(stats));
                stats.file_priv = file->driver_priv;
+               spin_lock(&file->table_lock);
                idr_for_each(&file->object_idr, per_file_stats, &stats);
+               spin_unlock(&file->table_lock);
                /*
                 * Although we have a valid reference on file->pid, that does
                 * not guarantee that the task_struct who called get_pid() is
index 4c22a5b..d443441 100644 (file)
@@ -36,6 +36,8 @@
 #include "i915_drv.h"
 #include "i915_trace.h"
 #include <linux/pci.h>
+#include <linux/console.h>
+#include <linux/vt.h>
 #include <linux/vgaarb.h>
 #include <linux/acpi.h>
 #include <linux/pnp.h>
@@ -1386,7 +1388,6 @@ cleanup_gem:
        i915_gem_context_fini(dev);
        mutex_unlock(&dev->struct_mutex);
        WARN_ON(dev_priv->mm.aliasing_ppgtt);
-       drm_mm_takedown(&dev_priv->gtt.base.mm);
 cleanup_irq:
        drm_irq_uninstall(dev);
 cleanup_gem_stolen:
@@ -1450,6 +1451,39 @@ static void i915_kick_out_firmware_fb(struct drm_i915_private *dev_priv)
 }
 #endif
 
+#if !defined(CONFIG_VGA_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return 0;
+}
+#elif !defined(CONFIG_DUMMY_CONSOLE)
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       return -ENODEV;
+}
+#else
+static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
+{
+       int ret = 0;
+
+       DRM_INFO("Replacing VGA console driver\n");
+
+       console_lock();
+       if (con_is_bound(&vga_con))
+               ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
+       if (ret == 0) {
+               ret = do_unregister_con_driver(&vga_con);
+
+               /* Ignore "already unregistered". */
+               if (ret == -ENODEV)
+                       ret = 0;
+       }
+       console_unlock();
+
+       return ret;
+}
+#endif
+
 static void i915_dump_device_info(struct drm_i915_private *dev_priv)
 {
        const struct intel_device_info *info = &dev_priv->info;
@@ -1623,8 +1657,15 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
        if (ret)
                goto out_regs;
 
-       if (drm_core_check_feature(dev, DRIVER_MODESET))
+       if (drm_core_check_feature(dev, DRIVER_MODESET)) {
+               ret = i915_kick_out_vgacon(dev_priv);
+               if (ret) {
+                       DRM_ERROR("failed to remove conflicting VGA console\n");
+                       goto out_gtt;
+               }
+
                i915_kick_out_firmware_fb(dev_priv);
+       }
 
        pci_set_master(dev->pdev);
 
@@ -1756,8 +1797,6 @@ out_mtrrfree:
        arch_phys_wc_del(dev_priv->gtt.mtrr);
        io_mapping_free(dev_priv->gtt.mappable);
 out_gtt:
-       list_del(&dev_priv->gtt.base.global_link);
-       drm_mm_takedown(&dev_priv->gtt.base.mm);
        dev_priv->gtt.base.cleanup(&dev_priv->gtt.base);
 out_regs:
        intel_uncore_fini(dev);
@@ -1846,7 +1885,6 @@ int i915_driver_unload(struct drm_device *dev)
                        i915_free_hws(dev);
        }
 
-       list_del(&dev_priv->gtt.base.global_link);
        WARN_ON(!list_empty(&dev_priv->vm_list));
 
        drm_vblank_cleanup(dev);
index 49414d3..374f964 100644 (file)
@@ -656,6 +656,7 @@ enum intel_sbi_destination {
 #define QUIRK_PIPEA_FORCE (1<<0)
 #define QUIRK_LVDS_SSC_DISABLE (1<<1)
 #define QUIRK_INVERT_BRIGHTNESS (1<<2)
+#define QUIRK_BACKLIGHT_PRESENT (1<<3)
 
 struct intel_fbdev;
 struct intel_fbc_work;
@@ -977,6 +978,8 @@ struct i915_power_well {
        bool always_on;
        /* power well enable/disable usage count */
        int count;
+       /* cached hw enabled state */
+       bool hw_enabled;
        unsigned long domains;
        unsigned long data;
        const struct i915_power_well_ops *ops;
index f361263..d893e4d 100644 (file)
@@ -1616,22 +1616,6 @@ out:
        return ret;
 }
 
-void i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
-{
-       struct i915_vma *vma;
-
-       /*
-        * Only the global gtt is relevant for gtt memory mappings, so restrict
-        * list traversal to objects bound into the global address space. Note
-        * that the active list should be empty, but better safe than sorry.
-        */
-       WARN_ON(!list_empty(&dev_priv->gtt.base.active_list));
-       list_for_each_entry(vma, &dev_priv->gtt.base.active_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-       list_for_each_entry(vma, &dev_priv->gtt.base.inactive_list, mm_list)
-               i915_gem_release_mmap(vma->obj);
-}
-
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1657,6 +1641,15 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        obj->fault_mappable = false;
 }
 
+void
+i915_gem_release_all_mmaps(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list)
+               i915_gem_release_mmap(obj);
+}
+
 uint32_t
 i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 {
index 3ffe308..a5ddf3b 100644 (file)
@@ -598,6 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
        struct intel_context *from = ring->last_context;
        struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
        u32 hw_flags = 0;
+       bool uninitialized = false;
        int ret, i;
 
        if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -696,19 +697,20 @@ static int do_switch(struct intel_engine_cs *ring,
                i915_gem_context_unreference(from);
        }
 
+       uninitialized = !to->is_initialized && from == NULL;
+       to->is_initialized = true;
+
 done:
        i915_gem_context_reference(to);
        ring->last_context = to;
        to->last_ring = ring;
 
-       if (ring->id == RCS && !to->is_initialized && from == NULL) {
+       if (uninitialized) {
                ret = i915_gem_render_state_init(ring);
                if (ret)
                        DRM_ERROR("init render state: %d\n", ret);
        }
 
-       to->is_initialized = true;
-
        return 0;
 
 unpin_out:
index eec820a..8b3cde7 100644 (file)
@@ -1992,7 +1992,10 @@ static void gen6_gmch_remove(struct i915_address_space *vm)
 
        struct i915_gtt *gtt = container_of(vm, struct i915_gtt, base);
 
-       drm_mm_takedown(&vm->mm);
+       if (drm_mm_initialized(&vm->mm)) {
+               drm_mm_takedown(&vm->mm);
+               list_del(&vm->global_link);
+       }
        iounmap(gtt->gsm);
        teardown_scratch_page(vm->dev);
 }
@@ -2025,6 +2028,10 @@ static int i915_gmch_probe(struct drm_device *dev,
 
 static void i915_gmch_remove(struct i915_address_space *vm)
 {
+       if (drm_mm_initialized(&vm->mm)) {
+               drm_mm_takedown(&vm->mm);
+               list_del(&vm->global_link);
+       }
        intel_gmch_remove();
 }
 
index 3521f99..34894b5 100644 (file)
@@ -31,7 +31,7 @@
 struct i915_render_state {
        struct drm_i915_gem_object *obj;
        unsigned long ggtt_offset;
-       void *batch;
+       u32 *batch;
        u32 size;
        u32 len;
 };
@@ -80,7 +80,7 @@ free:
 
 static void render_state_free(struct i915_render_state *so)
 {
-       kunmap(so->batch);
+       kunmap(kmap_to_page(so->batch));
        i915_gem_object_ggtt_unpin(so->obj);
        drm_gem_object_unreference(&so->obj->base);
        kfree(so);
index 62ef55b..7465ab0 100644 (file)
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
        if (base == 0)
                return 0;
 
+       /* make sure we don't clobber the GTT if it's within stolen memory */
+       if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
+               struct {
+                       u32 start, end;
+               } stolen[2] = {
+                       { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+                       { .start = base, .end = base + dev_priv->gtt.stolen_size, },
+               };
+               u64 gtt_start, gtt_end;
+
+               gtt_start = I915_READ(PGTBL_CTL);
+               if (IS_GEN4(dev))
+                       gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
+                               (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
+               else
+                       gtt_start &= PGTBL_ADDRESS_LO_MASK;
+               gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
+
+               if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
+                       stolen[0].end = gtt_start;
+               if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
+                       stolen[1].start = gtt_end;
+
+               /* pick the larger of the two chunks */
+               if (stolen[0].end - stolen[0].start >
+                   stolen[1].end - stolen[1].start) {
+                       base = stolen[0].start;
+                       dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
+               } else {
+                       base = stolen[1].start;
+                       dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
+               }
+
+               if (stolen[0].start != stolen[1].start ||
+                   stolen[0].end != stolen[1].end) {
+                       DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
+                                     (unsigned long long) gtt_start,
+                                     (unsigned long long) gtt_end - 1);
+                       DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
+                                     base, base + (u32) dev_priv->gtt.stolen_size - 1);
+               }
+       }
+
+
        /* Verify that nothing else uses this physical address. Stolen
         * memory should be reserved by the BIOS and hidden from the
         * kernel. So if the region is already marked as busy, something
index 87ec60e..66cf417 100644 (file)
@@ -888,6 +888,8 @@ static void i915_gem_record_rings(struct drm_device *dev,
        for (i = 0; i < I915_NUM_RINGS; i++) {
                struct intel_engine_cs *ring = &dev_priv->ring[i];
 
+               error->ring[i].pid = -1;
+
                if (ring->dev == NULL)
                        continue;
 
@@ -895,7 +897,6 @@ static void i915_gem_record_rings(struct drm_device *dev,
 
                i915_record_ring_state(dev, ring, &error->ring[i]);
 
-               error->ring[i].pid = -1;
                request = i915_gem_find_active_request(ring);
                if (request) {
                        /* We need to copy these to an anonymous buffer
index 6f8017a..c05c84f 100644 (file)
@@ -2845,20 +2845,27 @@ static int semaphore_passed(struct intel_engine_cs *ring)
 {
        struct drm_i915_private *dev_priv = ring->dev->dev_private;
        struct intel_engine_cs *signaller;
-       u32 seqno, ctl;
+       u32 seqno;
 
-       ring->hangcheck.deadlock = true;
+       ring->hangcheck.deadlock++;
 
        signaller = semaphore_waits_for(ring, &seqno);
-       if (signaller == NULL || signaller->hangcheck.deadlock)
+       if (signaller == NULL)
                return -1;
 
+       /* Prevent pathological recursion due to driver bugs */
+       if (signaller->hangcheck.deadlock >= I915_NUM_RINGS)
+               return -1;
+
+       if (i915_seqno_passed(signaller->get_seqno(signaller, false), seqno))
+               return 1;
+
        /* cursory check for an unkickable deadlock */
-       ctl = I915_READ_CTL(signaller);
-       if (ctl & RING_WAIT_SEMAPHORE && semaphore_passed(signaller) < 0)
+       if (I915_READ_CTL(signaller) & RING_WAIT_SEMAPHORE &&
+           semaphore_passed(signaller) < 0)
                return -1;
 
-       return i915_seqno_passed(signaller->get_seqno(signaller, false), seqno);
+       return 0;
 }
 
 static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
@@ -2867,7 +2874,7 @@ static void semaphore_clear_deadlocks(struct drm_i915_private *dev_priv)
        int i;
 
        for_each_ring(ring, dev_priv, i)
-               ring->hangcheck.deadlock = false;
+               ring->hangcheck.deadlock = 0;
 }
 
 static enum intel_ring_hangcheck_action
index e691b30..a5bab61 100644 (file)
@@ -942,6 +942,9 @@ enum punit_power_well {
 /*
  * Instruction and interrupt control regs
  */
+#define PGTBL_CTL      0x02020
+#define   PGTBL_ADDRESS_LO_MASK        0xfffff000 /* bits [31:12] */
+#define   PGTBL_ADDRESS_HI_MASK        0x000000f0 /* bits [35:32] (gen4) */
 #define PGTBL_ER       0x02024
 #define RENDER_RING_BASE       0x02000
 #define BSD_RING_BASE          0x04000
index 1ee98f1..827498e 100644 (file)
@@ -315,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
        const struct bdb_lfp_backlight_data *backlight_data;
        const struct bdb_lfp_backlight_data_entry *entry;
 
-       /* Err to enabling backlight if no backlight block. */
-       dev_priv->vbt.backlight.present = true;
-
        backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
        if (!backlight_data)
                return;
@@ -1088,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
 
        dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
 
+       /* Default to having backlight */
+       dev_priv->vbt.backlight.present = true;
+
        /* LFP panel data */
        dev_priv->vbt.lvds_dither = 1;
        dev_priv->vbt.lvds_vbt = 0;
index efd3cf5..f0be855 100644 (file)
@@ -2087,6 +2087,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
 static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
                                          enum plane plane, enum pipe pipe)
 {
+       struct drm_device *dev = dev_priv->dev;
        struct intel_crtc *intel_crtc =
                to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
        int reg;
@@ -2106,6 +2107,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
 
        I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
        intel_flush_primary_plane(dev_priv, plane);
+
+       /*
+        * BDW signals flip done immediately if the plane
+        * is disabled, even if the plane enable is already
+        * armed to occur at the next vblank :(
+        */
+       if (IS_BROADWELL(dev))
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
 }
 
 /**
@@ -4564,7 +4573,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
        if (intel_crtc->active)
                return;
 
-       vlv_prepare_pll(intel_crtc);
+       is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
+
+       if (!is_dsi && !IS_CHERRYVIEW(dev))
+               vlv_prepare_pll(intel_crtc);
 
        /* Set up the display plane register */
        dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4598,8 +4610,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
                if (encoder->pre_pll_enable)
                        encoder->pre_pll_enable(encoder);
 
-       is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
-
        if (!is_dsi) {
                if (IS_CHERRYVIEW(dev))
                        chv_enable_pll(intel_crtc);
@@ -11087,6 +11097,22 @@ const char *intel_output_name(int output)
        return names[output];
 }
 
+static bool intel_crt_present(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+
+       if (IS_ULT(dev))
+               return false;
+
+       if (IS_CHERRYVIEW(dev))
+               return false;
+
+       if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
+               return false;
+
+       return true;
+}
+
 static void intel_setup_outputs(struct drm_device *dev)
 {
        struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11095,7 +11121,7 @@ static void intel_setup_outputs(struct drm_device *dev)
 
        intel_lvds_init(dev);
 
-       if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support)
+       if (intel_crt_present(dev))
                intel_crt_init(dev);
 
        if (HAS_DDI(dev)) {
@@ -11565,6 +11591,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
        DRM_INFO("applying inverted panel brightness quirk\n");
 }
 
+/* Some VBT's incorrectly indicate no backlight is present */
+static void quirk_backlight_present(struct drm_device *dev)
+{
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
+       DRM_INFO("applying backlight present quirk\n");
+}
+
 struct intel_quirk {
        int device;
        int subsystem_vendor;
@@ -11633,6 +11667,15 @@ static struct intel_quirk intel_quirks[] = {
 
        /* Acer Aspire 5336 */
        { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
+
+       /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
+       { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
+
+       /* Toshiba CB35 Chromebook (Celeron 2955U) */
+       { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
+
+       /* HP Chromebook 14 (Celeron 2955U) */
+       { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
 };
 
 static void intel_init_quirks(struct drm_device *dev)
@@ -11871,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
                 * ...  */
                plane = crtc->plane;
                crtc->plane = !plane;
+               crtc->primary_enabled = true;
                dev_priv->display.crtc_disable(&crtc->base);
                crtc->plane = plane;
 
@@ -12411,8 +12455,8 @@ intel_display_capture_error_state(struct drm_device *dev)
 
        for_each_pipe(i) {
                error->pipe[i].power_domain_on =
-                       intel_display_power_enabled_sw(dev_priv,
-                                                      POWER_DOMAIN_PIPE(i));
+                       intel_display_power_enabled_unlocked(dev_priv,
+                                                          POWER_DOMAIN_PIPE(i));
                if (!error->pipe[i].power_domain_on)
                        continue;
 
@@ -12447,7 +12491,7 @@ intel_display_capture_error_state(struct drm_device *dev)
                enum transcoder cpu_transcoder = transcoders[i];
 
                error->transcoder[i].power_domain_on =
-                       intel_display_power_enabled_sw(dev_priv,
+                       intel_display_power_enabled_unlocked(dev_priv,
                                POWER_DOMAIN_TRANSCODER(cpu_transcoder));
                if (!error->transcoder[i].power_domain_on)
                        continue;
index 52fda95..8a1a4fb 100644 (file)
@@ -28,6 +28,8 @@
 #include <linux/i2c.h>
 #include <linux/slab.h>
 #include <linux/export.h>
+#include <linux/notifier.h>
+#include <linux/reboot.h>
 #include <drm/drmP.h>
 #include <drm/drm_crtc.h>
 #include <drm/drm_crtc_helper.h>
@@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
                return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
 }
 
+/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
+   This function only applicable when panel PM state is not to be tracked */
+static int edp_notify_handler(struct notifier_block *this, unsigned long code,
+                             void *unused)
+{
+       struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
+                                                edp_notifier);
+       struct drm_device *dev = intel_dp_to_dev(intel_dp);
+       struct drm_i915_private *dev_priv = dev->dev_private;
+       u32 pp_div;
+       u32 pp_ctrl_reg, pp_div_reg;
+       enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
+
+       if (!is_edp(intel_dp) || code != SYS_RESTART)
+               return 0;
+
+       if (IS_VALLEYVIEW(dev)) {
+               pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
+               pp_div_reg  = VLV_PIPE_PP_DIVISOR(pipe);
+               pp_div = I915_READ(pp_div_reg);
+               pp_div &= PP_REFERENCE_DIVIDER_MASK;
+
+               /* 0x1F write to PP_DIV_REG sets max cycle delay */
+               I915_WRITE(pp_div_reg, pp_div | 0x1F);
+               I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
+               msleep(intel_dp->panel_power_cycle_delay);
+       }
+
+       return 0;
+}
+
 static bool edp_have_panel_power(struct intel_dp *intel_dp)
 {
        struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -873,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
                mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
                                                   bpp);
 
-               for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
-                       for (clock = min_clock; clock <= max_clock; clock++) {
+               for (clock = min_clock; clock <= max_clock; clock++) {
+                       for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
                                link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
                                link_avail = intel_dp_max_data_rate(link_clock,
                                                                    lane_count);
@@ -3707,6 +3740,10 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
                drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
                edp_panel_vdd_off_sync(intel_dp);
                drm_modeset_unlock(&dev->mode_config.connection_mutex);
+               if (intel_dp->edp_notifier.notifier_call) {
+                       unregister_reboot_notifier(&intel_dp->edp_notifier);
+                       intel_dp->edp_notifier.notifier_call = NULL;
+               }
        }
        kfree(intel_dig_port);
 }
@@ -4184,6 +4221,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
        }
        mutex_unlock(&dev->mode_config.mutex);
 
+       if (IS_VALLEYVIEW(dev)) {
+               intel_dp->edp_notifier.notifier_call = edp_notify_handler;
+               register_reboot_notifier(&intel_dp->edp_notifier);
+       }
+
        intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
        intel_panel_setup_backlight(connector);
 
index bda0ae3..f67340e 100644 (file)
@@ -538,6 +538,8 @@ struct intel_dp {
        unsigned long last_power_on;
        unsigned long last_backlight_off;
        bool psr_setup_done;
+       struct notifier_block edp_notifier;
+
        bool use_tps3;
        struct intel_connector *attached_connector;
 
@@ -950,8 +952,8 @@ int intel_power_domains_init(struct drm_i915_private *);
 void intel_power_domains_remove(struct drm_i915_private *);
 bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain);
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
-                                   enum intel_display_power_domain domain);
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+                                         enum intel_display_power_domain domain);
 void intel_display_power_get(struct drm_i915_private *dev_priv,
                             enum intel_display_power_domain domain);
 void intel_display_power_put(struct drm_i915_private *dev_priv,
index 02f99d7..3fd0829 100644 (file)
@@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
        /* bandgap reset is needed after everytime we do power gate */
        band_gap_reset(dev_priv);
 
+       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       usleep_range(2500, 3000);
+
        val = I915_READ(MIPI_PORT_CTRL(pipe));
        I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
        usleep_range(1000, 1500);
-       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
-       usleep_range(2000, 2500);
-       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
-       usleep_range(2000, 2500);
-       I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
-       usleep_range(2000, 2500);
+
+       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+       usleep_range(2500, 3000);
+
        I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
-       usleep_range(2000, 2500);
+       usleep_range(2500, 3000);
 }
 
 static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
 
        DRM_DEBUG_KMS("\n");
 
-       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
        usleep_range(2000, 2500);
 
-       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
        usleep_range(2000, 2500);
 
-       I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
+       I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
        usleep_range(2000, 2500);
 
-       val = I915_READ(MIPI_PORT_CTRL(pipe));
-       I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
-       usleep_range(1000, 1500);
-
        if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
                                        == 0x00000), 30))
                DRM_ERROR("DSI LP not going Low\n");
 
+       val = I915_READ(MIPI_PORT_CTRL(pipe));
+       I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
+       usleep_range(1000, 1500);
+
        I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
        usleep_range(2000, 2500);
 
index 3eeb21b..933c863 100644 (file)
@@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
        else
                cmd |= DPI_LP_MODE;
 
-       /* DPI virtual channel?! */
-
-       mask = DPI_FIFO_EMPTY;
-       if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
-               DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
-
        /* clear bit */
        I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
 
index 2312602..5e5a72f 100644 (file)
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
 
        pipe_config->adjusted_mode.flags |= flags;
 
+       /* gen2/3 store dither state in pfit control, needs to match */
+       if (INTEL_INFO(dev)->gen < 4) {
+               tmp = I915_READ(PFIT_CONTROL);
+
+               pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
+       }
+
        dotclock = pipe_config->port_clock;
 
        if (HAS_PCH_SPLIT(dev_priv->dev))
index 2e2c71f..4f6b539 100644 (file)
@@ -403,6 +403,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
 
        DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
 
+       /*
+        * If the acpi_video interface is not supposed to be used, don't
+        * bother processing backlight level change requests from firmware.
+        */
+       if (!acpi_video_verify_backlight_support()) {
+               DRM_DEBUG_KMS("opregion backlight request ignored\n");
+               return 0;
+       }
+
        if (!(bclp & ASLE_BCLP_VALID))
                return ASLC_BACKLIGHT_FAILED;
 
index 5e6c888..12b02fe 100644 (file)
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
                pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
                                 PFIT_FILTER_FUZZY);
 
-       /* Make sure pre-965 set dither correctly for 18bpp panels. */
-       if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
-               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
-
 out:
        if ((pfit_control & PFIT_ENABLE) == 0) {
                pfit_control = 0;
                pfit_pgm_ratios = 0;
        }
 
+       /* Make sure pre-965 set dither correctly for 18bpp panels. */
+       if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
+               pfit_control |= PANEL_8TO6_DITHER_ENABLE;
+
        pipe_config->gmch_pfit.control = pfit_control;
        pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
        pipe_config->gmch_pfit.lvds_border_bits = border;
@@ -798,9 +798,6 @@ static void i965_enable_backlight(struct intel_connector *connector)
        ctl = freq << 16;
        I915_WRITE(BLC_PWM_CTL, ctl);
 
-       /* XXX: combine this into above write? */
-       intel_panel_actually_set_backlight(connector, panel->backlight.level);
-
        ctl2 = BLM_PIPE(pipe);
        if (panel->backlight.combination_mode)
                ctl2 |= BLM_COMBINATION_MODE;
@@ -809,6 +806,8 @@ static void i965_enable_backlight(struct intel_connector *connector)
        I915_WRITE(BLC_PWM_CTL2, ctl2);
        POSTING_READ(BLC_PWM_CTL2);
        I915_WRITE(BLC_PWM_CTL2, ctl2 | BLM_PWM_ENABLE);
+
+       intel_panel_actually_set_backlight(connector, panel->backlight.level);
 }
 
 static void vlv_enable_backlight(struct intel_connector *connector)
@@ -1119,8 +1118,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
        int ret;
 
        if (!dev_priv->vbt.backlight.present) {
-               DRM_DEBUG_KMS("native backlight control not available per VBT\n");
-               return 0;
+               if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
+                       DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
+               } else {
+                       DRM_DEBUG_KMS("no backlight present per VBT\n");
+                       return 0;
+               }
        }
 
        /* set level and max in panel struct */
index d1e53ab..ee72807 100644 (file)
@@ -511,8 +511,7 @@ void intel_update_fbc(struct drm_device *dev)
        obj = intel_fb->obj;
        adjusted_mode = &intel_crtc->config.adjusted_mode;
 
-       if (i915.enable_fbc < 0 &&
-           INTEL_INFO(dev)->gen <= 7 && !IS_HASWELL(dev)) {
+       if (i915.enable_fbc < 0) {
                if (set_no_fbc_reason(dev_priv, FBC_CHIP_DEFAULT))
                        DRM_DEBUG_KMS("disabled per chip default\n");
                goto out_disable;
@@ -3210,6 +3209,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
 */
 static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
 {
+       struct drm_device *dev = dev_priv->dev;
+
+       /* Latest VLV doesn't need to force the gfx clock */
+       if (dev->pdev->revision >= 0xd) {
+               valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
+               return;
+       }
+
        /*
         * When we are idle.  Drop to min voltage state.
         */
@@ -3506,15 +3513,11 @@ static void gen8_enable_rps(struct drm_device *dev)
 
        I915_WRITE(GEN6_RP_IDLE_HYSTERSIS, 10);
 
-       /* WaDisablePwrmtrEvent:chv (pre-production hw) */
-       I915_WRITE(0xA80C, I915_READ(0xA80C) & 0x00ffffff);
-       I915_WRITE(0xA810, I915_READ(0xA810) & 0xffffff00);
-
        /* 5: Enable RPS */
        I915_WRITE(GEN6_RP_CONTROL,
                   GEN6_RP_MEDIA_TURBO |
                   GEN6_RP_MEDIA_HW_NORMAL_MODE |
-                  GEN6_RP_MEDIA_IS_GFX | /* WaSetMaskForGfxBusyness:chv (pre-production hw ?) */
+                  GEN6_RP_MEDIA_IS_GFX |
                   GEN6_RP_ENABLE |
                   GEN6_RP_UP_BUSY_AVG |
                   GEN6_RP_DOWN_IDLE_AVG);
@@ -5608,8 +5611,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
                     (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
 }
 
-bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
-                                   enum intel_display_power_domain domain)
+bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
+                                         enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains;
        struct i915_power_well *power_well;
@@ -5620,16 +5623,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
                return false;
 
        power_domains = &dev_priv->power_domains;
+
        is_enabled = true;
+
        for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
                if (power_well->always_on)
                        continue;
 
-               if (!power_well->count) {
+               if (!power_well->hw_enabled) {
                        is_enabled = false;
                        break;
                }
        }
+
        return is_enabled;
 }
 
@@ -5637,30 +5643,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
                                 enum intel_display_power_domain domain)
 {
        struct i915_power_domains *power_domains;
-       struct i915_power_well *power_well;
-       bool is_enabled;
-       int i;
-
-       if (dev_priv->pm.suspended)
-               return false;
+       bool ret;
 
        power_domains = &dev_priv->power_domains;
 
-       is_enabled = true;
-
        mutex_lock(&power_domains->lock);
-       for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
-               if (power_well->always_on)
-                       continue;
-
-               if (!power_well->ops->is_enabled(dev_priv, power_well)) {
-                       is_enabled = false;
-                       break;
-               }
-       }
+       ret = intel_display_power_enabled_unlocked(dev_priv, domain);
        mutex_unlock(&power_domains->lock);
 
-       return is_enabled;
+       return ret;
 }
 
 /*
@@ -5981,6 +5972,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
                if (!power_well->count++) {
                        DRM_DEBUG_KMS("enabling %s\n", power_well->name);
                        power_well->ops->enable(dev_priv, power_well);
+                       power_well->hw_enabled = true;
                }
 
                check_power_well_state(dev_priv, power_well);
@@ -6010,6 +6002,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 
                if (!--power_well->count && i915.disable_power_well) {
                        DRM_DEBUG_KMS("disabling %s\n", power_well->name);
+                       power_well->hw_enabled = false;
                        power_well->ops->disable(dev_priv, power_well);
                }
 
@@ -6024,33 +6017,56 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
 static struct i915_power_domains *hsw_pwr;
 
 /* Display audio driver power well request */
-void i915_request_power_well(void)
+int i915_request_power_well(void)
 {
        struct drm_i915_private *dev_priv;
 
-       if (WARN_ON(!hsw_pwr))
-               return;
+       if (!hsw_pwr)
+               return -ENODEV;
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
        intel_display_power_get(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(i915_request_power_well);
 
 /* Display audio driver power well release */
-void i915_release_power_well(void)
+int i915_release_power_well(void)
 {
        struct drm_i915_private *dev_priv;
 
-       if (WARN_ON(!hsw_pwr))
-               return;
+       if (!hsw_pwr)
+               return -ENODEV;
 
        dev_priv = container_of(hsw_pwr, struct drm_i915_private,
                                power_domains);
        intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+       return 0;
 }
 EXPORT_SYMBOL_GPL(i915_release_power_well);
 
+/*
+ * Private interface for the audio driver to get CDCLK in kHz.
+ *
+ * Caller must request power well using i915_request_power_well() prior to
+ * making the call.
+ */
+int i915_get_cdclk_freq(void)
+{
+       struct drm_i915_private *dev_priv;
+
+       if (!hsw_pwr)
+               return -ENODEV;
+
+       dev_priv = container_of(hsw_pwr, struct drm_i915_private,
+                               power_domains);
+
+       return intel_ddi_get_cdclk_freq(dev_priv);
+}
+EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
+
+
 #define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
 
 #define HSW_ALWAYS_ON_POWER_DOMAINS (                  \
@@ -6270,8 +6286,11 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
        int i;
 
        mutex_lock(&power_domains->lock);
-       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains)
+       for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
                power_well->ops->sync_hw(dev_priv, power_well);
+               power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
+                                                                    power_well);
+       }
        mutex_unlock(&power_domains->lock);
 }
 
index 910c83c..e72017b 100644 (file)
@@ -55,7 +55,7 @@ struct intel_ring_hangcheck {
        u32 seqno;
        int score;
        enum intel_ring_hangcheck_action action;
-       bool deadlock;
+       int deadlock;
 };
 
 struct intel_ringbuffer {
index 6a4d5bc..20375cc 100644 (file)
@@ -1385,7 +1385,9 @@ static void intel_sdvo_get_config(struct intel_encoder *encoder,
                         >> SDVO_PORT_MULTIPLY_SHIFT) + 1;
        }
 
-       dotclock = pipe_config->port_clock / pipe_config->pixel_multiplier;
+       dotclock = pipe_config->port_clock;
+       if (pipe_config->pixel_multiplier)
+               dotclock /= pipe_config->pixel_multiplier;
 
        if (HAS_PCH_SPLIT(dev))
                ironlake_check_encoder_dotclock(pipe_config, dotclock);
index 1b66ddc..9a17b4e 100644 (file)
@@ -690,6 +690,14 @@ intel_post_enable_primary(struct drm_crtc *crtc)
        struct drm_device *dev = crtc->dev;
        struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
 
+       /*
+        * BDW signals flip done immediately if the plane
+        * is disabled, even if the plane enable is already
+        * armed to occur at the next vblank :(
+        */
+       if (IS_BROADWELL(dev))
+               intel_wait_for_vblank(dev, intel_crtc->pipe);
+
        /*
         * FIXME IPS should be fine as long as one plane is
         * enabled, but in practice it seems to have problems
index 79cba59..4f6fef7 100644 (file)
@@ -320,7 +320,8 @@ static void intel_uncore_forcewake_reset(struct drm_device *dev, bool restore)
        struct drm_i915_private *dev_priv = dev->dev_private;
        unsigned long irqflags;
 
-       del_timer_sync(&dev_priv->uncore.force_wake_timer);
+       if (del_timer_sync(&dev_priv->uncore.force_wake_timer))
+               gen6_force_wake_timer((unsigned long)dev_priv);
 
        /* Hold uncore.lock across reset to prevent any register access
         * with forcewake not set correctly
index ae750f6..7f7aade 100644 (file)
@@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
        static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
        static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
+       static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
        static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
 
        config.phy_init      = hdmi_phy_8x74_init;
@@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
        config.pwr_reg_names = pwr_reg_names;
        config.pwr_reg_cnt   = ARRAY_SIZE(pwr_reg_names);
        config.hpd_clk_names = hpd_clk_names;
+       config.hpd_freq      = hpd_clk_freq;
        config.hpd_clk_cnt   = ARRAY_SIZE(hpd_clk_names);
        config.pwr_clk_names = pwr_clk_names;
        config.pwr_clk_cnt   = ARRAY_SIZE(pwr_clk_names);
index 9fafee6..9d7723c 100644 (file)
@@ -87,6 +87,7 @@ struct hdmi_platform_config {
 
        /* clks that need to be on for hpd: */
        const char **hpd_clk_names;
+       const long unsigned *hpd_freq;
        int hpd_clk_cnt;
 
        /* clks that need to be on for screen pwr (ie pixel clk): */
index e56a619..28f7e3e 100644 (file)
@@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
        }
 
        for (i = 0; i < config->hpd_clk_cnt; i++) {
+               if (config->hpd_freq && config->hpd_freq[i]) {
+                       ret = clk_set_rate(hdmi->hpd_clks[i],
+                                       config->hpd_freq[i]);
+                       if (ret)
+                               dev_warn(dev->dev, "failed to set clk %s (%d)\n",
+                                               config->hpd_clk_names[i], ret);
+               }
+
                ret = clk_prepare_enable(hdmi->hpd_clks[i]);
                if (ret) {
                        dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
index 42caf7f..71510ee 100644 (file)
 #include "msm_mmu.h"
 #include "mdp5_kms.h"
 
+static const char *iommu_ports[] = {
+               "mdp_0",
+};
+
 static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
 
 static int mdp5_hw_init(struct msm_kms *kms)
@@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
 static void mdp5_destroy(struct msm_kms *kms)
 {
        struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
+       struct msm_mmu *mmu = mdp5_kms->mmu;
+
+       if (mmu) {
+               mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
+               mmu->funcs->destroy(mmu);
+       }
        kfree(mdp5_kms);
 }
 
@@ -216,10 +226,6 @@ fail:
        return ret;
 }
 
-static const char *iommu_ports[] = {
-               "mdp_0",
-};
-
 static int get_clk(struct platform_device *pdev, struct clk **clkp,
                const char *name)
 {
@@ -317,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
                mmu = msm_iommu_new(dev, config->iommu);
                if (IS_ERR(mmu)) {
                        ret = PTR_ERR(mmu);
+                       dev_err(dev->dev, "failed to init iommu: %d\n", ret);
                        goto fail;
                }
+
                ret = mmu->funcs->attach(mmu, iommu_ports,
                                ARRAY_SIZE(iommu_ports));
-               if (ret)
+               if (ret) {
+                       dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
+                       mmu->funcs->destroy(mmu);
                        goto fail;
+               }
        } else {
                dev_info(dev->dev, "no iommu, fallback to phys "
                                "contig buffers for scanout\n");
                mmu = NULL;
        }
+       mdp5_kms->mmu = mmu;
 
        mdp5_kms->id = msm_register_mmu(dev, mmu);
        if (mdp5_kms->id < 0) {
index c8b1a25..6e981b6 100644 (file)
@@ -33,6 +33,7 @@ struct mdp5_kms {
 
        /* mapper-id used to request GEM buffer mapped for scanout: */
        int id;
+       struct msm_mmu *mmu;
 
        /* for tracking smp allocation amongst pipes: */
        mdp5_smp_state_t smp_state;
index 0d2562f..9a5d87d 100644 (file)
@@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
 static int get_mdp_ver(struct platform_device *pdev)
 {
 #ifdef CONFIG_OF
-       const static struct of_device_id match_types[] = { {
+       static const struct of_device_id match_types[] = { {
                .compatible = "qcom,mdss_mdp",
                .data   = (void *)5,
        }, {
index a752ab8..5107fc4 100644 (file)
@@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
        struct drm_framebuffer *fb = NULL;
        struct fb_info *fbi = NULL;
        struct drm_mode_fb_cmd2 mode_cmd = {0};
-       dma_addr_t paddr;
+       uint32_t paddr;
        int ret, size;
 
        sizes->surface_bpp = 32;
index bb8026d..690d7e7 100644 (file)
@@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
                uint32_t *iova)
 {
        struct msm_gem_object *msm_obj = to_msm_bo(obj);
+       struct drm_device *dev = obj->dev;
        int ret = 0;
 
        if (!msm_obj->domain[id].iova) {
@@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
                struct msm_mmu *mmu = priv->mmus[id];
                struct page **pages = get_pages(obj);
 
+               if (!mmu) {
+                       dev_err(dev->dev, "null MMU pointer\n");
+                       return -EINVAL;
+               }
+
                if (IS_ERR(pages))
                        return PTR_ERR(pages);
 
index 92b7459..4b2ad91 100644 (file)
@@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
                unsigned long iova, int flags, void *arg)
 {
        DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
-       return 0;
+       return -ENOSYS;
 }
 
 static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
@@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
        for (i = 0; i < cnt; i++) {
                struct device *msm_iommu_get_ctx(const char *ctx_name);
                struct device *ctx = msm_iommu_get_ctx(names[i]);
-               if (IS_ERR_OR_NULL(ctx))
+               if (IS_ERR_OR_NULL(ctx)) {
+                       dev_warn(dev->dev, "couldn't get %s context", names[i]);
                        continue;
+               }
                ret = iommu_attach_device(iommu->domain, ctx);
                if (ret) {
                        dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
@@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
        return 0;
 }
 
+static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+       struct msm_iommu *iommu = to_msm_iommu(mmu);
+       int i;
+
+       for (i = 0; i < cnt; i++) {
+               struct device *msm_iommu_get_ctx(const char *ctx_name);
+               struct device *ctx = msm_iommu_get_ctx(names[i]);
+               if (IS_ERR_OR_NULL(ctx))
+                       continue;
+               iommu_detach_device(iommu->domain, ctx);
+       }
+}
+
 static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
                struct sg_table *sgt, unsigned len, int prot)
 {
@@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
 
                VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
 
-               BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE));
+               BUG_ON(!PAGE_ALIGNED(bytes));
 
                da += bytes;
        }
@@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
 
 static const struct msm_mmu_funcs funcs = {
                .attach = msm_iommu_attach,
+               .detach = msm_iommu_detach,
                .map = msm_iommu_map,
                .unmap = msm_iommu_unmap,
                .destroy = msm_iommu_destroy,
index 0303244..21da6d1 100644 (file)
@@ -22,6 +22,7 @@
 
 struct msm_mmu_funcs {
        int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
+       void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
        int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
                        unsigned len, int prot);
        int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
index 2b6156d..8b307e1 100644 (file)
@@ -140,6 +140,7 @@ nouveau-y += core/subdev/i2c/nv4e.o
 nouveau-y += core/subdev/i2c/nv50.o
 nouveau-y += core/subdev/i2c/nv94.o
 nouveau-y += core/subdev/i2c/nvd0.o
+nouveau-y += core/subdev/i2c/gf117.o
 nouveau-y += core/subdev/i2c/nve0.o
 nouveau-y += core/subdev/ibus/nvc0.o
 nouveau-y += core/subdev/ibus/nve0.o
index f199957..8d55ed6 100644 (file)
@@ -314,7 +314,7 @@ nvc0_identify(struct nouveau_device *device)
                device->cname = "GF117";
                device->oclass[NVDEV_SUBDEV_VBIOS  ] = &nouveau_bios_oclass;
                device->oclass[NVDEV_SUBDEV_GPIO   ] =  nvd0_gpio_oclass;
-               device->oclass[NVDEV_SUBDEV_I2C    ] =  nvd0_i2c_oclass;
+               device->oclass[NVDEV_SUBDEV_I2C    ] =  gf117_i2c_oclass;
                device->oclass[NVDEV_SUBDEV_CLOCK  ] = &nvc0_clock_oclass;
                device->oclass[NVDEV_SUBDEV_THERM  ] = &nvd0_therm_oclass;
                device->oclass[NVDEV_SUBDEV_MXM    ] = &nv50_mxm_oclass;
index c41f656..9c38c5e 100644 (file)
@@ -99,8 +99,10 @@ _nouveau_disp_dtor(struct nouveau_object *object)
 
        nouveau_event_destroy(&disp->vblank);
 
-       list_for_each_entry_safe(outp, outt, &disp->outp, head) {
-               nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+       if (disp->outp.next) {
+               list_for_each_entry_safe(outp, outt, &disp->outp, head) {
+                       nouveau_object_ref(NULL, (struct nouveau_object **)&outp);
+               }
        }
 
        nouveau_engine_destroy(&disp->base);
index 39562d4..5a5b59b 100644 (file)
@@ -241,7 +241,9 @@ dp_link_train_eq(struct dp_state *dp)
                dp_set_training_pattern(dp, 2);
 
        do {
-               if (dp_link_train_update(dp, dp->pc2, 400))
+               if ((tries &&
+                   dp_link_train_commit(dp, dp->pc2)) ||
+                   dp_link_train_update(dp, dp->pc2, 400))
                        break;
 
                eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE);
@@ -253,9 +255,6 @@ dp_link_train_eq(struct dp_state *dp)
                            !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED))
                                eq_done = false;
                }
-
-               if (dp_link_train_commit(dp, dp->pc2))
-                       break;
        } while (!eq_done && cr_done && ++tries <= 5);
 
        return eq_done ? 0 : -1;
index 1e85f36..2283c44 100644 (file)
@@ -1270,7 +1270,7 @@ exec_clkcmp(struct nv50_disp_priv *priv, int head, int id, u32 pclk, u32 *conf)
        i--;
 
        outp = exec_lookup(priv, head, i, ctrl, &data, &ver, &hdr, &cnt, &len, &info1);
-       if (!data)
+       if (!outp)
                return NULL;
 
        if (outp->info.location == 0) {
@@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
                }
 
                switch ((ctrl & 0x000f0000) >> 16) {
-               case 6: datarate = pclk * 30 / 8; break;
-               case 5: datarate = pclk * 24 / 8; break;
+               case 6: datarate = pclk * 30; break;
+               case 5: datarate = pclk * 24; break;
                case 2:
                default:
-                       datarate = pclk * 18 / 8;
+                       datarate = pclk * 18;
                        break;
                }
 
index 48aa38a..fa30d81 100644 (file)
@@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
        if (outp->info.type == DCB_OUTPUT_DP) {
                u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
                switch ((sync & 0x000003c0) >> 6) {
-               case 6: pclk = pclk * 30 / 8; break;
-               case 5: pclk = pclk * 24 / 8; break;
+               case 6: pclk = pclk * 30; break;
+               case 5: pclk = pclk * 24; break;
                case 2:
                default:
-                       pclk = pclk * 18 / 8;
+                       pclk = pclk * 18;
                        break;
                }
 
index 52c299c..eb2d778 100644 (file)
@@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
        struct nvkm_output_dp *outp = (void *)base;
        bool retrain = true;
        u8 link[2], stat[3];
-       u32 rate;
+       u32 linkrate;
        int ret, i;
 
        /* check that the link is trained at a high enough rate */
@@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
                goto done;
        }
 
-       rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
-       if (rate < ((datarate / 8) * 10)) {
+       linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
+       linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
+       datarate = (datarate + 9) / 10; /* -> decakilobits */
+       if (linkrate < datarate) {
                DBG("link not trained at sufficient rate\n");
                goto done;
        }
index e183277..7a1ebdf 100644 (file)
@@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
                        struct nvkm_output_dp *outpdp = (void *)outp;
                        switch (data) {
                        case NV94_DISP_SOR_DP_PWR_STATE_OFF:
+                               nouveau_event_put(outpdp->irq);
                                ((struct nvkm_output_dp_impl *)nv_oclass(outp))
                                        ->lnk_pwr(outpdp, 0);
                                atomic_set(&outpdp->lt.done, 0);
index 2f7345f..7445f12 100644 (file)
@@ -54,7 +54,7 @@ mmio_list_base:
 #ifdef INCLUDE_CODE
 // reports an exception to the host
 //
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
 //
 error:
        push $r14
index c8ddb8d..b4ad18b 100644 (file)
@@ -49,7 +49,7 @@ hub_mmio_list_next:
 #ifdef INCLUDE_CODE
 // reports an exception to the host
 //
-// In: $r15 error code (see nvc0.fuc)
+// In: $r15 error code (see os.h)
 //
 error:
        nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(5), 0, $r15)
@@ -343,13 +343,25 @@ ih:
        ih_no_ctxsw:
        and $r11 $r10 NV_PGRAPH_FECS_INTR_FWMTHD
        bra e #ih_no_fwmthd
-               // none we handle, ack, and fall-through to unhandled
+               // none we handle; report to host and ack
+               nv_rd32($r15, NV_PGRAPH_TRAPPED_DATA_LO)
+               nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(4), 0, $r15)
+               nv_rd32($r15, NV_PGRAPH_TRAPPED_ADDR)
+               nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(3), 0, $r15)
+               extr $r14 $r15 16:18
+               shl b32 $r14 $r14 2
+               imm32($r15, NV_PGRAPH_FE_OBJECT_TABLE(0))
+               add b32 $r14 $r15
+               call(nv_rd32)
+               nv_iowr(NV_PGRAPH_FECS_CC_SCRATCH_VAL(2), 0, $r15)
+               mov $r15 E_BAD_FWMTHD
+               call(error)
                mov $r11 0x100
                nv_wr32(0x400144, $r11)
 
        // anything we didn't handle, bring it to the host's attention
        ih_no_fwmthd:
-       mov $r11 0x104 // FIFO | CHSW
+       mov $r11 0x504 // FIFO | CHSW | FWMTHD
        not b32 $r11
        and $r11 $r10 $r11
        bra e #ih_no_other
index 214dd16..5f953c5 100644 (file)
@@ -478,10 +478,10 @@ uint32_t gm107_grhub_code[] = {
        0x01040080,
        0xbd0001f6,
        0x01004104,
-       0x627e020f,
-       0x717e0006,
+       0xa87e020f,
+       0xb77e0006,
        0x100f0006,
-       0x0006b37e,
+       0x0006f97e,
        0x98000e98,
        0x207e010f,
        0x14950001,
@@ -523,8 +523,8 @@ uint32_t gm107_grhub_code[] = {
        0x800040b7,
        0xf40132b6,
        0x000fb41b,
-       0x0006b37e,
-       0x627e000f,
+       0x0006f97e,
+       0xa87e000f,
        0x00800006,
        0x01f60201,
        0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t gm107_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0231f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -563,7 +563,7 @@ uint32_t gm107_grhub_code[] = {
        0x37008006,
        0x0009f602,
        0x31f404bd,
-       0x08367e01,
+       0x087c7e01,
        0xf094bd00,
        0x00800699,
        0x09f60217,
@@ -572,7 +572,7 @@ uint32_t gm107_grhub_code[] = {
        0x20f92f0e,
        0x32f412b2,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x008020fc,
        0x02f602c0,
        0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t gm107_grhub_code[] = {
        0x23c8130e,
        0x0d0bf41f,
        0xf40131f4,
-       0x367e0232,
+       0x7c7e0232,
 /* 0x054e: chsw_done */
        0x01020008,
        0x02c30080,
@@ -593,7 +593,7 @@ uint32_t gm107_grhub_code[] = {
        0xb0ff2a0e,
        0x1bf401e4,
        0x7ef2b20c,
-       0xf40007d6,
+       0xf400081c,
 /* 0x057a: main_not_ctx_chan */
        0xe4b0400e,
        0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t gm107_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -642,238 +642,238 @@ uint32_t gm107_grhub_code[] = {
 /* 0x061a: ih_no_ctxsw */
        0xabe40000,
        0x0bf40400,
-       0x01004b10,
-       0x448ebfb2,
-       0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
-       0x044b0000,
-       0xffb0bd01,
-       0x0bf4b4ab,
-       0x0700800c,
-       0x000bf603,
-/* 0x0642: ih_no_other */
-       0x004004bd,
-       0x000af601,
-       0xf0fc04bd,
-       0xd0fce0fc,
-       0xa0fcb0fc,
-       0x80fc90fc,
-       0xfc0088fe,
-       0x0032f480,
-/* 0x0662: ctx_4170s */
-       0xf5f001f8,
-       0x8effb210,
-       0x7e404170,
-       0xf800008f,
-/* 0x0671: ctx_4170w */
-       0x41708e00,
+       0x07088e56,
        0x00657e40,
-       0xf0ffb200,
-       0x1bf410f4,
-/* 0x0683: ctx_redswitch */
-       0x4e00f8f3,
-       0xe5f00200,
-       0x20e5f040,
-       0x8010e5f0,
-       0xf6018500,
-       0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
-       0xf2b6080f,
-       0xfd1bf401,
-       0x0400e5f1,
-       0x0100e5f1,
-       0x01850080,
-       0xbd000ef6,
-/* 0x06b3: ctx_86c */
-       0x8000f804,
-       0xf6022300,
+       0x80ffb200,
+       0xf6020400,
        0x04bd000f,
-       0x148effb2,
-       0x8f7e408a,
-       0xffb20000,
-       0x41a88c8e,
+       0x4007048e,
+       0x0000657e,
+       0x0080ffb2,
+       0x0ff60203,
+       0xc704bd00,
+       0xee9450fe,
+       0x07008f02,
+       0x00efbb40,
+       0x0000657e,
+       0x02020080,
+       0xbd000ff6,
+       0x7e030f04,
+       0x4b0002f8,
+       0xbfb20100,
+       0x4001448e,
        0x00008f7e,
-/* 0x06d2: ctx_mem */
-       0x008000f8,
-       0x0ff60284,
-/* 0x06db: ctx_mem_wait */
-       0x8f04bd00,
-       0xcf028400,
-       0xfffd00ff,
-       0xf61bf405,
-/* 0x06ea: ctx_load */
-       0x94bd00f8,
-       0x800599f0,
-       0xf6023700,
-       0x04bd0009,
-       0xb87e0c0a,
-       0xf4bd0000,
-       0x02890080,
+/* 0x0674: ih_no_fwmthd */
+       0xbd05044b,
+       0xb4abffb0,
+       0x800c0bf4,
+       0xf6030700,
+       0x04bd000b,
+/* 0x0688: ih_no_other */
+       0xf6010040,
+       0x04bd000a,
+       0xe0fcf0fc,
+       0xb0fcd0fc,
+       0x90fca0fc,
+       0x88fe80fc,
+       0xf480fc00,
+       0x01f80032,
+/* 0x06a8: ctx_4170s */
+       0xb210f5f0,
+       0x41708eff,
+       0x008f7e40,
+/* 0x06b7: ctx_4170w */
+       0x8e00f800,
+       0x7e404170,
+       0xb2000065,
+       0x10f4f0ff,
+       0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+       0x02004e00,
+       0xf040e5f0,
+       0xe5f020e5,
+       0x85008010,
+       0x000ef601,
+       0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+       0xf401f2b6,
+       0xe5f1fd1b,
+       0xe5f10400,
+       0x00800100,
+       0x0ef60185,
+       0xf804bd00,
+/* 0x06f9: ctx_86c */
+       0x23008000,
+       0x000ff602,
+       0xffb204bd,
+       0x408a148e,
+       0x00008f7e,
+       0x8c8effb2,
+       0x8f7e41a8,
+       0x00f80000,
+/* 0x0718: ctx_mem */
+       0x02840080,
        0xbd000ff6,
-       0xc1008004,
-       0x0002f602,
-       0x008004bd,
-       0x02f60283,
-       0x0f04bd00,
-       0x06d27e07,
-       0xc0008000,
-       0x0002f602,
-       0x0bfe04bd,
-       0x1f2af000,
-       0xb60424b6,
-       0x94bd0220,
-       0x800899f0,
-       0xf6023700,
-       0x04bd0009,
-       0x02810080,
-       0xbd0002f6,
-       0x0000d204,
-       0x25f08000,
-       0x88008002,
-       0x0002f602,
-       0x100104bd,
-       0xf0020042,
-       0x12fa0223,
-       0xbd03f805,
-       0x0899f094,
-       0x02170080,
-       0xbd0009f6,
-       0x81019804,
-       0x981814b6,
-       0x25b68002,
-       0x0512fd08,
-       0xbd1601b5,
-       0x0999f094,
-       0x02370080,
-       0xbd0009f6,
-       0x81008004,
-       0x0001f602,
-       0x010204bd,
-       0x02880080,
+/* 0x0721: ctx_mem_wait */
+       0x84008f04,
+       0x00ffcf02,
+       0xf405fffd,
+       0x00f8f61b,
+/* 0x0730: ctx_load */
+       0x99f094bd,
+       0x37008005,
+       0x0009f602,
+       0x0c0a04bd,
+       0x0000b87e,
+       0x0080f4bd,
+       0x0ff60289,
+       0x8004bd00,
+       0xf602c100,
+       0x04bd0002,
+       0x02830080,
        0xbd0002f6,
-       0x01004104,
-       0xfa0613f0,
-       0x03f80501,
+       0x7e070f04,
+       0x80000718,
+       0xf602c000,
+       0x04bd0002,
+       0xf0000bfe,
+       0x24b61f2a,
+       0x0220b604,
        0x99f094bd,
-       0x17008009,
+       0x37008008,
        0x0009f602,
-       0x94bd04bd,
-       0x800599f0,
+       0x008004bd,
+       0x02f60281,
+       0xd204bd00,
+       0x80000000,
+       0x800225f0,
+       0xf6028800,
+       0x04bd0002,
+       0x00421001,
+       0x0223f002,
+       0xf80512fa,
+       0xf094bd03,
+       0x00800899,
+       0x09f60217,
+       0x9804bd00,
+       0x14b68101,
+       0x80029818,
+       0xfd0825b6,
+       0x01b50512,
+       0xf094bd16,
+       0x00800999,
+       0x09f60237,
+       0x8004bd00,
+       0xf6028100,
+       0x04bd0001,
+       0x00800102,
+       0x02f60288,
+       0x4104bd00,
+       0x13f00100,
+       0x0501fa06,
+       0x94bd03f8,
+       0x800999f0,
        0xf6021700,
        0x04bd0009,
-/* 0x07d6: ctx_chan */
-       0xea7e00f8,
-       0x0c0a0006,
-       0x0000b87e,
-       0xd27e050f,
-       0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
-       0x80410398,
+       0x99f094bd,
+       0x17008005,
+       0x0009f602,
+       0x00f804bd,
+/* 0x081c: ctx_chan */
+       0x0007307e,
+       0xb87e0c0a,
+       0x050f0000,
+       0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+       0x039800f8,
+       0x81008041,
+       0x0003f602,
+       0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+       0xf4ff34c4,
+       0x00450e1b,
+       0x0653f002,
+       0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+       0x804e9803,
+       0x7e814f98,
+       0xb600008f,
+       0x12b60830,
+       0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+       0x80160398,
        0xf6028100,
        0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
-       0x34c434bd,
-       0x0e1bf4ff,
-       0xf0020045,
-       0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
-       0x9803f805,
-       0x4f98804e,
-       0x008f7e81,
-       0x0830b600,
-       0xf40112b6,
-/* 0x081a: ctx_mmio_done */
-       0x0398df1b,
-       0x81008016,
-       0x0003f602,
-       0x00b504bd,
-       0x01004140,
-       0xfa0613f0,
-       0x03f80601,
-/* 0x0836: ctx_xfer */
-       0x040e00f8,
-       0x03020080,
-       0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
-       0x00008e04,
-       0x00eecf03,
-       0x2000e4f1,
-       0xf4f51bf4,
-       0x02f40611,
-/* 0x0855: ctx_xfer_pre */
-       0x7e100f0c,
-       0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
-       0x020f1b11,
-       0x0006627e,
-       0x0006717e,
-       0x0006837e,
-       0x627ef4bd,
-       0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
-       0x01980006,
-       0x8024bd16,
-       0xf6010500,
-       0x04bd0002,
-       0x008e1fb2,
-       0x8f7e41a5,
-       0xfcf00000,
-       0x022cf001,
-       0xfd0124b6,
-       0xffb205f2,
-       0x41a5048e,
+       0x414000b5,
+       0x13f00100,
+       0x0601fa06,
+       0x00f803f8,
+/* 0x087c: ctx_xfer */
+       0x0080040e,
+       0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+       0x8e04bd00,
+       0xcf030000,
+       0xe4f100ee,
+       0x1bf42000,
+       0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+       0x0f0c02f4,
+       0x06f97e10,
+       0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+       0xa87e020f,
+       0xb77e0006,
+       0xc97e0006,
+       0xf4bd0006,
+       0x0006a87e,
+       0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+       0xbd160198,
+       0x05008024,
+       0x0002f601,
+       0x1fb204bd,
+       0x41a5008e,
        0x00008f7e,
-       0x0002167e,
-       0xfc8024bd,
-       0x02f60247,
-       0xf004bd00,
-       0x20b6012c,
-       0x4afc8003,
-       0x0002f602,
-       0xacf004bd,
-       0x06a5f001,
-       0x0c98000b,
-       0x010d9800,
-       0x3d7e000e,
-       0x080a0001,
-       0x0000ec7e,
-       0x00020a7e,
-       0x0a1201f4,
-       0x00b87e0c,
-       0x7e050f00,
-       0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
-       0x020f2d02,
-       0x0006627e,
-       0xb37ef4bd,
-       0x277e0006,
-       0x717e0002,
+       0xf001fcf0,
+       0x24b6022c,
+       0x05f2fd01,
+       0x048effb2,
+       0x8f7e41a5,
+       0x167e0000,
+       0x24bd0002,
+       0x0247fc80,
+       0xbd0002f6,
+       0x012cf004,
+       0x800320b6,
+       0xf6024afc,
+       0x04bd0002,
+       0xf001acf0,
+       0x000b06a5,
+       0x98000c98,
+       0x000e010d,
+       0x00013d7e,
+       0xec7e080a,
+       0x0a7e0000,
+       0x01f40002,
+       0x7e0c0a12,
+       0x0f0000b8,
+       0x07187e05,
+       0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+       0xa87e020f,
        0xf4bd0006,
-       0x0006627e,
-       0x981011f4,
-       0x11fd4001,
-       0x070bf405,
-       0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
-       0x000000f8,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x0006f97e,
+       0x0002277e,
+       0x0006b77e,
+       0xa87ef4bd,
+       0x11f40006,
+       0x40019810,
+       0xf40511fd,
+       0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+       0x00f80008,
        0x00000000,
        0x00000000,
        0x00000000,
index 64dfd75..e49b5a8 100644 (file)
@@ -478,10 +478,10 @@ uint32_t nv108_grhub_code[] = {
        0x01040080,
        0xbd0001f6,
        0x01004104,
-       0x627e020f,
-       0x717e0006,
+       0xa87e020f,
+       0xb77e0006,
        0x100f0006,
-       0x0006b37e,
+       0x0006f97e,
        0x98000e98,
        0x207e010f,
        0x14950001,
@@ -523,8 +523,8 @@ uint32_t nv108_grhub_code[] = {
        0x800040b7,
        0xf40132b6,
        0x000fb41b,
-       0x0006b37e,
-       0x627e000f,
+       0x0006f97e,
+       0xa87e000f,
        0x00800006,
        0x01f60201,
        0xbd04bd00,
@@ -554,7 +554,7 @@ uint32_t nv108_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0231f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -563,7 +563,7 @@ uint32_t nv108_grhub_code[] = {
        0x37008006,
        0x0009f602,
        0x31f404bd,
-       0x08367e01,
+       0x087c7e01,
        0xf094bd00,
        0x00800699,
        0x09f60217,
@@ -572,7 +572,7 @@ uint32_t nv108_grhub_code[] = {
        0x20f92f0e,
        0x32f412b2,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x008020fc,
        0x02f602c0,
        0xf404bd00,
@@ -580,7 +580,7 @@ uint32_t nv108_grhub_code[] = {
        0x23c8130e,
        0x0d0bf41f,
        0xf40131f4,
-       0x367e0232,
+       0x7c7e0232,
 /* 0x054e: chsw_done */
        0x01020008,
        0x02c30080,
@@ -593,7 +593,7 @@ uint32_t nv108_grhub_code[] = {
        0xb0ff2a0e,
        0x1bf401e4,
        0x7ef2b20c,
-       0xf40007d6,
+       0xf400081c,
 /* 0x057a: main_not_ctx_chan */
        0xe4b0400e,
        0x2c1bf402,
@@ -602,7 +602,7 @@ uint32_t nv108_grhub_code[] = {
        0x0009f602,
        0x32f404bd,
        0x0232f401,
-       0x0008367e,
+       0x00087c7e,
        0x99f094bd,
        0x17008007,
        0x0009f602,
@@ -642,238 +642,238 @@ uint32_t nv108_grhub_code[] = {
 /* 0x061a: ih_no_ctxsw */
        0xabe40000,
        0x0bf40400,
-       0x01004b10,
-       0x448ebfb2,
-       0x8f7e4001,
-/* 0x062e: ih_no_fwmthd */
-       0x044b0000,
-       0xffb0bd01,
-       0x0bf4b4ab,
-       0x0700800c,
-       0x000bf603,
-/* 0x0642: ih_no_other */
-       0x004004bd,
-       0x000af601,
-       0xf0fc04bd,
-       0xd0fce0fc,
-       0xa0fcb0fc,
-       0x80fc90fc,
-       0xfc0088fe,
-       0x0032f480,
-/* 0x0662: ctx_4170s */
-       0xf5f001f8,
-       0x8effb210,
-       0x7e404170,
-       0xf800008f,
-/* 0x0671: ctx_4170w */
-       0x41708e00,
+       0x07088e56,
        0x00657e40,
-       0xf0ffb200,
-       0x1bf410f4,
-/* 0x0683: ctx_redswitch */
-       0x4e00f8f3,
-       0xe5f00200,
-       0x20e5f040,
-       0x8010e5f0,
-       0xf6018500,
-       0x04bd000e,
-/* 0x069a: ctx_redswitch_delay */
-       0xf2b6080f,
-       0xfd1bf401,
-       0x0400e5f1,
-       0x0100e5f1,
-       0x01850080,
-       0xbd000ef6,
-/* 0x06b3: ctx_86c */
-       0x8000f804,
-       0xf6022300,
+       0x80ffb200,
+       0xf6020400,
        0x04bd000f,
-       0x148effb2,
-       0x8f7e408a,
-       0xffb20000,
-       0x41a88c8e,
+       0x4007048e,
+       0x0000657e,
+       0x0080ffb2,
+       0x0ff60203,
+       0xc704bd00,
+       0xee9450fe,
+       0x07008f02,
+       0x00efbb40,
+       0x0000657e,
+       0x02020080,
+       0xbd000ff6,
+       0x7e030f04,
+       0x4b0002f8,
+       0xbfb20100,
+       0x4001448e,
        0x00008f7e,
-/* 0x06d2: ctx_mem */
-       0x008000f8,
-       0x0ff60284,
-/* 0x06db: ctx_mem_wait */
-       0x8f04bd00,
-       0xcf028400,
-       0xfffd00ff,
-       0xf61bf405,
-/* 0x06ea: ctx_load */
-       0x94bd00f8,
-       0x800599f0,
-       0xf6023700,
-       0x04bd0009,
-       0xb87e0c0a,
-       0xf4bd0000,
-       0x02890080,
+/* 0x0674: ih_no_fwmthd */
+       0xbd05044b,
+       0xb4abffb0,
+       0x800c0bf4,
+       0xf6030700,
+       0x04bd000b,
+/* 0x0688: ih_no_other */
+       0xf6010040,
+       0x04bd000a,
+       0xe0fcf0fc,
+       0xb0fcd0fc,
+       0x90fca0fc,
+       0x88fe80fc,
+       0xf480fc00,
+       0x01f80032,
+/* 0x06a8: ctx_4170s */
+       0xb210f5f0,
+       0x41708eff,
+       0x008f7e40,
+/* 0x06b7: ctx_4170w */
+       0x8e00f800,
+       0x7e404170,
+       0xb2000065,
+       0x10f4f0ff,
+       0xf8f31bf4,
+/* 0x06c9: ctx_redswitch */
+       0x02004e00,
+       0xf040e5f0,
+       0xe5f020e5,
+       0x85008010,
+       0x000ef601,
+       0x080f04bd,
+/* 0x06e0: ctx_redswitch_delay */
+       0xf401f2b6,
+       0xe5f1fd1b,
+       0xe5f10400,
+       0x00800100,
+       0x0ef60185,
+       0xf804bd00,
+/* 0x06f9: ctx_86c */
+       0x23008000,
+       0x000ff602,
+       0xffb204bd,
+       0x408a148e,
+       0x00008f7e,
+       0x8c8effb2,
+       0x8f7e41a8,
+       0x00f80000,
+/* 0x0718: ctx_mem */
+       0x02840080,
        0xbd000ff6,
-       0xc1008004,
-       0x0002f602,
-       0x008004bd,
-       0x02f60283,
-       0x0f04bd00,
-       0x06d27e07,
-       0xc0008000,
-       0x0002f602,
-       0x0bfe04bd,
-       0x1f2af000,
-       0xb60424b6,
-       0x94bd0220,
-       0x800899f0,
-       0xf6023700,
-       0x04bd0009,
-       0x02810080,
-       0xbd0002f6,
-       0x0000d204,
-       0x25f08000,
-       0x88008002,
-       0x0002f602,
-       0x100104bd,
-       0xf0020042,
-       0x12fa0223,
-       0xbd03f805,
-       0x0899f094,
-       0x02170080,
-       0xbd0009f6,
-       0x81019804,
-       0x981814b6,
-       0x25b68002,
-       0x0512fd08,
-       0xbd1601b5,
-       0x0999f094,
-       0x02370080,
-       0xbd0009f6,
-       0x81008004,
-       0x0001f602,
-       0x010204bd,
-       0x02880080,
+/* 0x0721: ctx_mem_wait */
+       0x84008f04,
+       0x00ffcf02,
+       0xf405fffd,
+       0x00f8f61b,
+/* 0x0730: ctx_load */
+       0x99f094bd,
+       0x37008005,
+       0x0009f602,
+       0x0c0a04bd,
+       0x0000b87e,
+       0x0080f4bd,
+       0x0ff60289,
+       0x8004bd00,
+       0xf602c100,
+       0x04bd0002,
+       0x02830080,
        0xbd0002f6,
-       0x01004104,
-       0xfa0613f0,
-       0x03f80501,
+       0x7e070f04,
+       0x80000718,
+       0xf602c000,
+       0x04bd0002,
+       0xf0000bfe,
+       0x24b61f2a,
+       0x0220b604,
        0x99f094bd,
-       0x17008009,
+       0x37008008,
        0x0009f602,
-       0x94bd04bd,
-       0x800599f0,
+       0x008004bd,
+       0x02f60281,
+       0xd204bd00,
+       0x80000000,
+       0x800225f0,
+       0xf6028800,
+       0x04bd0002,
+       0x00421001,
+       0x0223f002,
+       0xf80512fa,
+       0xf094bd03,
+       0x00800899,
+       0x09f60217,
+       0x9804bd00,
+       0x14b68101,
+       0x80029818,
+       0xfd0825b6,
+       0x01b50512,
+       0xf094bd16,
+       0x00800999,
+       0x09f60237,
+       0x8004bd00,
+       0xf6028100,
+       0x04bd0001,
+       0x00800102,
+       0x02f60288,
+       0x4104bd00,
+       0x13f00100,
+       0x0501fa06,
+       0x94bd03f8,
+       0x800999f0,
        0xf6021700,
        0x04bd0009,
-/* 0x07d6: ctx_chan */
-       0xea7e00f8,
-       0x0c0a0006,
-       0x0000b87e,
-       0xd27e050f,
-       0x00f80006,
-/* 0x07e8: ctx_mmio_exec */
-       0x80410398,
+       0x99f094bd,
+       0x17008005,
+       0x0009f602,
+       0x00f804bd,
+/* 0x081c: ctx_chan */
+       0x0007307e,
+       0xb87e0c0a,
+       0x050f0000,
+       0x0007187e,
+/* 0x082e: ctx_mmio_exec */
+       0x039800f8,
+       0x81008041,
+       0x0003f602,
+       0x34bd04bd,
+/* 0x083c: ctx_mmio_loop */
+       0xf4ff34c4,
+       0x00450e1b,
+       0x0653f002,
+       0xf80535fa,
+/* 0x084d: ctx_mmio_pull */
+       0x804e9803,
+       0x7e814f98,
+       0xb600008f,
+       0x12b60830,
+       0xdf1bf401,
+/* 0x0860: ctx_mmio_done */
+       0x80160398,
        0xf6028100,
        0x04bd0003,
-/* 0x07f6: ctx_mmio_loop */
-       0x34c434bd,
-       0x0e1bf4ff,
-       0xf0020045,
-       0x35fa0653,
-/* 0x0807: ctx_mmio_pull */
-       0x9803f805,
-       0x4f98804e,
-       0x008f7e81,
-       0x0830b600,
-       0xf40112b6,
-/* 0x081a: ctx_mmio_done */
-       0x0398df1b,
-       0x81008016,
-       0x0003f602,
-       0x00b504bd,
-       0x01004140,
-       0xfa0613f0,
-       0x03f80601,
-/* 0x0836: ctx_xfer */
-       0x040e00f8,
-       0x03020080,
-       0xbd000ef6,
-/* 0x0841: ctx_xfer_idle */
-       0x00008e04,
-       0x00eecf03,
-       0x2000e4f1,
-       0xf4f51bf4,
-       0x02f40611,
-/* 0x0855: ctx_xfer_pre */
-       0x7e100f0c,
-       0xf40006b3,
-/* 0x085e: ctx_xfer_pre_load */
-       0x020f1b11,
-       0x0006627e,
-       0x0006717e,
-       0x0006837e,
-       0x627ef4bd,
-       0xea7e0006,
-/* 0x0876: ctx_xfer_exec */
-       0x01980006,
-       0x8024bd16,
-       0xf6010500,
-       0x04bd0002,
-       0x008e1fb2,
-       0x8f7e41a5,
-       0xfcf00000,
-       0x022cf001,
-       0xfd0124b6,
-       0xffb205f2,
-       0x41a5048e,
+       0x414000b5,
+       0x13f00100,
+       0x0601fa06,
+       0x00f803f8,
+/* 0x087c: ctx_xfer */
+       0x0080040e,
+       0x0ef60302,
+/* 0x0887: ctx_xfer_idle */
+       0x8e04bd00,
+       0xcf030000,
+       0xe4f100ee,
+       0x1bf42000,
+       0x0611f4f5,
+/* 0x089b: ctx_xfer_pre */
+       0x0f0c02f4,
+       0x06f97e10,
+       0x1b11f400,
+/* 0x08a4: ctx_xfer_pre_load */
+       0xa87e020f,
+       0xb77e0006,
+       0xc97e0006,
+       0xf4bd0006,
+       0x0006a87e,
+       0x0007307e,
+/* 0x08bc: ctx_xfer_exec */
+       0xbd160198,
+       0x05008024,
+       0x0002f601,
+       0x1fb204bd,
+       0x41a5008e,
        0x00008f7e,
-       0x0002167e,
-       0xfc8024bd,
-       0x02f60247,
-       0xf004bd00,
-       0x20b6012c,
-       0x4afc8003,
-       0x0002f602,
-       0xacf004bd,
-       0x06a5f001,
-       0x0c98000b,
-       0x010d9800,
-       0x3d7e000e,
-       0x080a0001,
-       0x0000ec7e,
-       0x00020a7e,
-       0x0a1201f4,
-       0x00b87e0c,
-       0x7e050f00,
-       0xf40006d2,
-/* 0x08f2: ctx_xfer_post */
-       0x020f2d02,
-       0x0006627e,
-       0xb37ef4bd,
-       0x277e0006,
-       0x717e0002,
+       0xf001fcf0,
+       0x24b6022c,
+       0x05f2fd01,
+       0x048effb2,
+       0x8f7e41a5,
+       0x167e0000,
+       0x24bd0002,
+       0x0247fc80,
+       0xbd0002f6,
+       0x012cf004,
+       0x800320b6,
+       0xf6024afc,
+       0x04bd0002,
+       0xf001acf0,
+       0x000b06a5,
+       0x98000c98,
+       0x000e010d,
+       0x00013d7e,
+       0xec7e080a,
+       0x0a7e0000,
+       0x01f40002,
+       0x7e0c0a12,
+       0x0f0000b8,
+       0x07187e05,
+       0x2d02f400,
+/* 0x0938: ctx_xfer_post */
+       0xa87e020f,
        0xf4bd0006,
-       0x0006627e,
-       0x981011f4,
-       0x11fd4001,
-       0x070bf405,
-       0x0007e87e,
-/* 0x091c: ctx_xfer_no_post_mmio */
-/* 0x091c: ctx_xfer_done */
-       0x000000f8,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
-       0x00000000,
+       0x0006f97e,
+       0x0002277e,
+       0x0006b77e,
+       0xa87ef4bd,
+       0x11f40006,
+       0x40019810,
+       0xf40511fd,
+       0x2e7e070b,
+/* 0x0962: ctx_xfer_no_post_mmio */
+/* 0x0962: ctx_xfer_done */
+       0x00f80008,
        0x00000000,
        0x00000000,
        0x00000000,
index f8f7b27..92dfe6a 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nvc0_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0xb521f502,
-       0xc721f507,
-       0x10f7f007,
-       0x081421f5,
+       0x0d21f502,
+       0x1f21f508,
+       0x10f7f008,
+       0x086c21f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvc0_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x081421f5,
+       0x086c21f5,
        0xf500f7f0,
-       0xf107b521,
+       0xf1080d21,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvc0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvc0_grhub_code[] = {
        0x0203f00f,
        0xbd0009d0,
        0x0131f404,
-       0x09e821f5,
+       0x0a4021f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvc0_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09e821,
+       0xfc0a4021,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvc0_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09e821f5,
+       0x0a4021f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvc0_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x7821f502,
+       0xd021f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvc0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvc0_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvc0_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
        0xf001f800,
        0xffb901f7,
        0x60e7f102,
        0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
        0xf19d21f4,
        0xf04160e7,
        0x21f440e3,
        0x02ffb968,
        0xf404ffc8,
        0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
        0xffb9f4bd,
        0x60e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
        0x10f5f000,
        0xf102ffb9,
        0xf04170e7,
        0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
        0xf100f89d,
        0xf04170e7,
        0x21f440e3,
        0x02ffb968,
        0xf410f4f0,
        0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
        0x0200e7f1,
        0xf040e5f0,
        0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvc0_grhub_code[] = {
        0x0103f085,
        0xbd000ed0,
        0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
        0xf401f2b6,
        0xe5f1fd1b,
        0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvc0_grhub_code[] = {
        0x03f08500,
        0x000ed001,
        0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
        0x1b0007f1,
        0xd00203f0,
        0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvc0_grhub_code[] = {
        0xa86ce7f1,
        0xf441e3f0,
        0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
        0x840007f1,
        0xd00203f0,
        0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
        0x8400f7f1,
        0xcf02f3f0,
        0xfffd00ff,
        0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
        0x94bd00f8,
        0xf10599f0,
        0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvc0_grhub_code[] = {
        0x02d00203,
        0xf004bd00,
        0x21f507f7,
-       0x07f1083c,
+       0x07f10894,
        0x03f0c000,
        0x0002d002,
        0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvc0_grhub_code[] = {
        0x03f01700,
        0x0009d002,
        0x00f804bd,
-/* 0x0978: ctx_chan */
-       0x077f21f5,
-       0x085a21f5,
+/* 0x09d0: ctx_chan */
+       0x07d721f5,
+       0x08b221f5,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
-       0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+       0x9421f505,
+       0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
        0x9800f807,
        0x07f14103,
        0x03f08100,
        0x0003d002,
        0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
        0xf4ff34c4,
        0x57f10f1b,
        0x53f00200,
        0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
        0x4e9803f8,
        0x814f9880,
        0xb69d21f4,
        0x12b60830,
        0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
        0xf1160398,
        0xf0810007,
        0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvc0_grhub_code[] = {
        0x13f00100,
        0x0601fa06,
        0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
        0xf104e7f0,
        0xf0020007,
        0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
        0xf104bd00,
        0xf00000e7,
        0xeecf03e3,
        0x00e4f100,
        0xf21bf420,
        0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
        0xf7f01102,
-       0x1421f510,
-       0x7f21f508,
+       0x6c21f510,
+       0xd721f508,
        0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
        0xf502f7f0,
-       0xf507b521,
-       0xf507c721,
-       0xbd07dc21,
-       0xb521f5f4,
-       0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+       0xf5080d21,
+       0xf5081f21,
+       0xbd083421,
+       0x0d21f5f4,
+       0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
        0x16019808,
        0x07f124bd,
        0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvc0_grhub_code[] = {
        0x1301f402,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
+       0x9421f505,
        0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
        0xf502f7f0,
-       0xbd07b521,
-       0x1421f5f4,
+       0xbd080d21,
+       0x6c21f5f4,
        0x7f21f508,
-       0xc721f502,
-       0xf5f4bd07,
-       0xf407b521,
+       0x1f21f502,
+       0xf5f4bd08,
+       0xf4080d21,
        0x01981011,
        0x0511fd40,
        0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
-       0xf5099321,
-/* 0x0af3: ctx_xfer_done */
-       0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+       0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+       0xf807fc21,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index 624215a..62b0c76 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nvd7_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0xb521f502,
-       0xc721f507,
-       0x10f7f007,
-       0x081421f5,
+       0x0d21f502,
+       0x1f21f508,
+       0x10f7f008,
+       0x086c21f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvd7_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x081421f5,
+       0x086c21f5,
        0xf500f7f0,
-       0xf107b521,
+       0xf1080d21,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvd7_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvd7_grhub_code[] = {
        0x0203f00f,
        0xbd0009d0,
        0x0131f404,
-       0x09e821f5,
+       0x0a4021f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvd7_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09e821,
+       0xfc0a4021,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvd7_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09e821f5,
+       0x0a4021f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvd7_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x7821f502,
+       0xd021f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvd7_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xe821f502,
-       0xf094bd09,
+       0x4021f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvd7_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,36 +753,36 @@ uint32_t nvd7_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4160s */
+/* 0x07d7: ctx_4160s */
        0xf001f800,
        0xffb901f7,
        0x60e7f102,
        0x40e3f041,
-/* 0x078f: ctx_4160s_wait */
+/* 0x07e7: ctx_4160s_wait */
        0xf19d21f4,
        0xf04160e7,
        0x21f440e3,
        0x02ffb968,
        0xf404ffc8,
        0x00f8f00b,
-/* 0x07a4: ctx_4160c */
+/* 0x07fc: ctx_4160c */
        0xffb9f4bd,
        0x60e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x07b5: ctx_4170s */
+/* 0x080d: ctx_4170s */
        0x10f5f000,
        0xf102ffb9,
        0xf04170e7,
        0x21f440e3,
-/* 0x07c7: ctx_4170w */
+/* 0x081f: ctx_4170w */
        0xf100f89d,
        0xf04170e7,
        0x21f440e3,
        0x02ffb968,
        0xf410f4f0,
        0x00f8f01b,
-/* 0x07dc: ctx_redswitch */
+/* 0x0834: ctx_redswitch */
        0x0200e7f1,
        0xf040e5f0,
        0xe5f020e5,
@@ -768,7 +790,7 @@ uint32_t nvd7_grhub_code[] = {
        0x0103f085,
        0xbd000ed0,
        0x08f7f004,
-/* 0x07f8: ctx_redswitch_delay */
+/* 0x0850: ctx_redswitch_delay */
        0xf401f2b6,
        0xe5f1fd1b,
        0xe5f10400,
@@ -776,7 +798,7 @@ uint32_t nvd7_grhub_code[] = {
        0x03f08500,
        0x000ed001,
        0x00f804bd,
-/* 0x0814: ctx_86c */
+/* 0x086c: ctx_86c */
        0x1b0007f1,
        0xd00203f0,
        0x04bd000f,
@@ -787,16 +809,16 @@ uint32_t nvd7_grhub_code[] = {
        0xa86ce7f1,
        0xf441e3f0,
        0x00f89d21,
-/* 0x083c: ctx_mem */
+/* 0x0894: ctx_mem */
        0x840007f1,
        0xd00203f0,
        0x04bd000f,
-/* 0x0848: ctx_mem_wait */
+/* 0x08a0: ctx_mem_wait */
        0x8400f7f1,
        0xcf02f3f0,
        0xfffd00ff,
        0xf31bf405,
-/* 0x085a: ctx_load */
+/* 0x08b2: ctx_load */
        0x94bd00f8,
        0xf10599f0,
        0xf00f0007,
@@ -814,7 +836,7 @@ uint32_t nvd7_grhub_code[] = {
        0x02d00203,
        0xf004bd00,
        0x21f507f7,
-       0x07f1083c,
+       0x07f10894,
        0x03f0c000,
        0x0002d002,
        0x0bfe04bd,
@@ -869,31 +891,31 @@ uint32_t nvd7_grhub_code[] = {
        0x03f01700,
        0x0009d002,
        0x00f804bd,
-/* 0x0978: ctx_chan */
-       0x077f21f5,
-       0x085a21f5,
+/* 0x09d0: ctx_chan */
+       0x07d721f5,
+       0x08b221f5,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
-       0xa421f508,
-/* 0x0993: ctx_mmio_exec */
+       0x9421f505,
+       0xfc21f508,
+/* 0x09eb: ctx_mmio_exec */
        0x9800f807,
        0x07f14103,
        0x03f08100,
        0x0003d002,
        0x34bd04bd,
-/* 0x09a4: ctx_mmio_loop */
+/* 0x09fc: ctx_mmio_loop */
        0xf4ff34c4,
        0x57f10f1b,
        0x53f00200,
        0x0535fa06,
-/* 0x09b6: ctx_mmio_pull */
+/* 0x0a0e: ctx_mmio_pull */
        0x4e9803f8,
        0x814f9880,
        0xb69d21f4,
        0x12b60830,
        0xdf1bf401,
-/* 0x09c8: ctx_mmio_done */
+/* 0x0a20: ctx_mmio_done */
        0xf1160398,
        0xf0810007,
        0x03d00203,
@@ -902,30 +924,30 @@ uint32_t nvd7_grhub_code[] = {
        0x13f00100,
        0x0601fa06,
        0x00f803f8,
-/* 0x09e8: ctx_xfer */
+/* 0x0a40: ctx_xfer */
        0xf104e7f0,
        0xf0020007,
        0x0ed00303,
-/* 0x09f7: ctx_xfer_idle */
+/* 0x0a4f: ctx_xfer_idle */
        0xf104bd00,
        0xf00000e7,
        0xeecf03e3,
        0x00e4f100,
        0xf21bf420,
        0xf40611f4,
-/* 0x0a0e: ctx_xfer_pre */
+/* 0x0a66: ctx_xfer_pre */
        0xf7f01102,
-       0x1421f510,
-       0x7f21f508,
+       0x6c21f510,
+       0xd721f508,
        0x1c11f407,
-/* 0x0a1c: ctx_xfer_pre_load */
+/* 0x0a74: ctx_xfer_pre_load */
        0xf502f7f0,
-       0xf507b521,
-       0xf507c721,
-       0xbd07dc21,
-       0xb521f5f4,
-       0x5a21f507,
-/* 0x0a35: ctx_xfer_exec */
+       0xf5080d21,
+       0xf5081f21,
+       0xbd083421,
+       0x0d21f5f4,
+       0xb221f508,
+/* 0x0a8d: ctx_xfer_exec */
        0x16019808,
        0x07f124bd,
        0x03f00500,
@@ -960,23 +982,65 @@ uint32_t nvd7_grhub_code[] = {
        0x1301f402,
        0xf40ca7f0,
        0xf7f0d021,
-       0x3c21f505,
+       0x9421f505,
        0x3202f408,
-/* 0x0ac4: ctx_xfer_post */
+/* 0x0b1c: ctx_xfer_post */
        0xf502f7f0,
-       0xbd07b521,
-       0x1421f5f4,
+       0xbd080d21,
+       0x6c21f5f4,
        0x7f21f508,
-       0xc721f502,
-       0xf5f4bd07,
-       0xf407b521,
+       0x1f21f502,
+       0xf5f4bd08,
+       0xf4080d21,
        0x01981011,
        0x0511fd40,
        0xf5070bf4,
-/* 0x0aef: ctx_xfer_no_post_mmio */
-       0xf5099321,
-/* 0x0af3: ctx_xfer_done */
-       0xf807a421,
+/* 0x0b47: ctx_xfer_no_post_mmio */
+       0xf509eb21,
+/* 0x0b4b: ctx_xfer_done */
+       0xf807fc21,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
        0x00000000,
        0x00000000,
        0x00000000,
index 6547b3d..51c3797 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nve0_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0x7f21f502,
-       0x9121f507,
+       0xd721f502,
+       0xe921f507,
        0x10f7f007,
-       0x07de21f5,
+       0x083621f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nve0_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x07de21f5,
+       0x083621f5,
        0xf500f7f0,
-       0xf1077f21,
+       0xf107d721,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nve0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nve0_grhub_code[] = {
        0x0203f00f,
        0xbd0009d0,
        0x0131f404,
-       0x09aa21f5,
+       0x0a0221f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nve0_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09aa21,
+       0xfc0a0221,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nve0_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09aa21f5,
+       0x0a0221f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nve0_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x4221f502,
+       0x9a21f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nve0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nve0_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nve0_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
        0xf001f800,
        0xffb910f5,
        0x70e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
        0x70e7f100,
        0x40e3f041,
        0xb96821f4,
        0xf4f002ff,
        0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
        0xe7f100f8,
        0xe5f00200,
        0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nve0_grhub_code[] = {
        0xf0850007,
        0x0ed00103,
        0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
        0xf2b608f7,
        0xfd1bf401,
        0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nve0_grhub_code[] = {
        0x850007f1,
        0xd00103f0,
        0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
        0x07f100f8,
        0x03f01b00,
        0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nve0_grhub_code[] = {
        0xe7f102ff,
        0xe3f0a86c,
        0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
        0x07f100f8,
        0x03f08400,
        0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
        0xf7f104bd,
        0xf3f08400,
        0x00ffcf02,
        0xf405fffd,
        0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
        0x99f094bd,
        0x0007f105,
        0x0203f00f,
@@ -797,7 +819,7 @@ uint32_t nve0_grhub_code[] = {
        0x0203f083,
        0xbd0002d0,
        0x07f7f004,
-       0x080621f5,
+       0x085e21f5,
        0xc00007f1,
        0xd00203f0,
        0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nve0_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
        0x21f500f8,
-       0xa7f00824,
+       0xa7f0087c,
        0xd021f40c,
        0xf505f7f0,
-       0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+       0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
        0x41039800,
        0x810007f1,
        0xd00203f0,
        0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
        0x34c434bd,
        0x0f1bf4ff,
        0x020057f1,
        0xfa0653f0,
        0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
        0x98804e98,
        0x21f4814f,
        0x0830b69d,
        0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
        0x0398df1b,
        0x0007f116,
        0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nve0_grhub_code[] = {
        0x010017f1,
        0xfa0613f0,
        0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
        0xe7f000f8,
        0x0007f104,
        0x0303f002,
        0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
        0x00e7f104,
        0x03e3f000,
        0xf100eecf,
        0xf42000e4,
        0x11f4f21b,
        0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
        0xf510f7f0,
-       0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+       0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
        0xf7f01c11,
-       0x7f21f502,
-       0x9121f507,
-       0xa621f507,
+       0xd721f502,
+       0xe921f507,
+       0xfe21f507,
        0xf5f4bd07,
-       0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
-       0x98082421,
+       0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+       0x98087c21,
        0x24bd1601,
        0x050007f1,
        0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nve0_grhub_code[] = {
        0xa7f01301,
        0xd021f40c,
        0xf505f7f0,
-       0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+       0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
        0xf7f02e02,
-       0x7f21f502,
+       0xd721f502,
        0xf5f4bd07,
-       0xf507de21,
+       0xf5083621,
        0xf5027f21,
-       0xbd079121,
-       0x7f21f5f4,
+       0xbd07e921,
+       0xd721f5f4,
        0x1011f407,
        0xfd400198,
        0x0bf40511,
-       0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+       0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
        0x0000f809,
        0x00000000,
        0x00000000,
@@ -977,4 +999,46 @@ uint32_t nve0_grhub_code[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
 };
index a5aee5a..a0af4b7 100644 (file)
@@ -528,10 +528,10 @@ uint32_t nvf0_grhub_code[] = {
        0x0001d001,
        0x17f104bd,
        0xf7f00100,
-       0x7f21f502,
-       0x9121f507,
+       0xd721f502,
+       0xe921f507,
        0x10f7f007,
-       0x07de21f5,
+       0x083621f5,
        0x98000e98,
        0x21f5010f,
        0x14950150,
@@ -574,9 +574,9 @@ uint32_t nvf0_grhub_code[] = {
        0xb6800040,
        0x1bf40132,
        0x00f7f0be,
-       0x07de21f5,
+       0x083621f5,
        0xf500f7f0,
-       0xf1077f21,
+       0xf107d721,
        0xf0010007,
        0x01d00203,
        0xbd04bd00,
@@ -610,8 +610,8 @@ uint32_t nvf0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x31f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -621,7 +621,7 @@ uint32_t nvf0_grhub_code[] = {
        0x0203f037,
        0xbd0009d0,
        0x0131f404,
-       0x09aa21f5,
+       0x0a0221f5,
        0x99f094bd,
        0x0007f106,
        0x0203f017,
@@ -631,7 +631,7 @@ uint32_t nvf0_grhub_code[] = {
        0x12b920f9,
        0x0132f402,
        0xf50232f4,
-       0xfc09aa21,
+       0xfc0a0221,
        0x0007f120,
        0x0203f0c0,
        0xbd0002d0,
@@ -640,7 +640,7 @@ uint32_t nvf0_grhub_code[] = {
        0xf41f23c8,
        0x31f40d0b,
        0x0232f401,
-       0x09aa21f5,
+       0x0a0221f5,
 /* 0x063c: chsw_done */
        0xf10127f0,
        0xf0c30007,
@@ -654,7 +654,7 @@ uint32_t nvf0_grhub_code[] = {
 /* 0x0660: main_not_ctx_switch */
        0xf401e4b0,
        0xf2b90d1b,
-       0x4221f502,
+       0x9a21f502,
        0x460ef409,
 /* 0x0670: main_not_ctx_chan */
        0xf402e4b0,
@@ -664,8 +664,8 @@ uint32_t nvf0_grhub_code[] = {
        0x09d00203,
        0xf404bd00,
        0x32f40132,
-       0xaa21f502,
-       0xf094bd09,
+       0x0221f502,
+       0xf094bd0a,
        0x07f10799,
        0x03f01700,
        0x0009d002,
@@ -710,18 +710,40 @@ uint32_t nvf0_grhub_code[] = {
 /* 0x072b: ih_no_ctxsw */
        0xe40421f4,
        0xf40400ab,
-       0xb7f1140b,
+       0xe7f16c0b,
+       0xe3f00708,
+       0x6821f440,
+       0xf102ffb9,
+       0xf0040007,
+       0x0fd00203,
+       0xf104bd00,
+       0xf00704e7,
+       0x21f440e3,
+       0x02ffb968,
+       0x030007f1,
+       0xd00203f0,
+       0x04bd000f,
+       0x9450fec7,
+       0xf7f102ee,
+       0xf3f00700,
+       0x00efbb40,
+       0xf16821f4,
+       0xf0020007,
+       0x0fd00203,
+       0xf004bd00,
+       0x21f503f7,
+       0xb7f1037e,
        0xbfb90100,
        0x44e7f102,
        0x40e3f001,
-/* 0x0743: ih_no_fwmthd */
+/* 0x079b: ih_no_fwmthd */
        0xf19d21f4,
-       0xbd0104b7,
+       0xbd0504b7,
        0xb4abffb0,
        0xf10f0bf4,
        0xf0070007,
        0x0bd00303,
-/* 0x075b: ih_no_other */
+/* 0x07b3: ih_no_other */
        0xf104bd00,
        0xf0010007,
        0x0ad00003,
@@ -731,19 +753,19 @@ uint32_t nvf0_grhub_code[] = {
        0xfc90fca0,
        0x0088fe80,
        0x32f480fc,
-/* 0x077f: ctx_4170s */
+/* 0x07d7: ctx_4170s */
        0xf001f800,
        0xffb910f5,
        0x70e7f102,
        0x40e3f041,
        0xf89d21f4,
-/* 0x0791: ctx_4170w */
+/* 0x07e9: ctx_4170w */
        0x70e7f100,
        0x40e3f041,
        0xb96821f4,
        0xf4f002ff,
        0xf01bf410,
-/* 0x07a6: ctx_redswitch */
+/* 0x07fe: ctx_redswitch */
        0xe7f100f8,
        0xe5f00200,
        0x20e5f040,
@@ -751,7 +773,7 @@ uint32_t nvf0_grhub_code[] = {
        0xf0850007,
        0x0ed00103,
        0xf004bd00,
-/* 0x07c2: ctx_redswitch_delay */
+/* 0x081a: ctx_redswitch_delay */
        0xf2b608f7,
        0xfd1bf401,
        0x0400e5f1,
@@ -759,7 +781,7 @@ uint32_t nvf0_grhub_code[] = {
        0x850007f1,
        0xd00103f0,
        0x04bd000e,
-/* 0x07de: ctx_86c */
+/* 0x0836: ctx_86c */
        0x07f100f8,
        0x03f02300,
        0x000fd002,
@@ -770,17 +792,17 @@ uint32_t nvf0_grhub_code[] = {
        0xe7f102ff,
        0xe3f0a88c,
        0x9d21f441,
-/* 0x0806: ctx_mem */
+/* 0x085e: ctx_mem */
        0x07f100f8,
        0x03f08400,
        0x000fd002,
-/* 0x0812: ctx_mem_wait */
+/* 0x086a: ctx_mem_wait */
        0xf7f104bd,
        0xf3f08400,
        0x00ffcf02,
        0xf405fffd,
        0x00f8f31b,
-/* 0x0824: ctx_load */
+/* 0x087c: ctx_load */
        0x99f094bd,
        0x0007f105,
        0x0203f037,
@@ -797,7 +819,7 @@ uint32_t nvf0_grhub_code[] = {
        0x0203f083,
        0xbd0002d0,
        0x07f7f004,
-       0x080621f5,
+       0x085e21f5,
        0xc00007f1,
        0xd00203f0,
        0x04bd0002,
@@ -852,29 +874,29 @@ uint32_t nvf0_grhub_code[] = {
        0x170007f1,
        0xd00203f0,
        0x04bd0009,
-/* 0x0942: ctx_chan */
+/* 0x099a: ctx_chan */
        0x21f500f8,
-       0xa7f00824,
+       0xa7f0087c,
        0xd021f40c,
        0xf505f7f0,
-       0xf8080621,
-/* 0x0955: ctx_mmio_exec */
+       0xf8085e21,
+/* 0x09ad: ctx_mmio_exec */
        0x41039800,
        0x810007f1,
        0xd00203f0,
        0x04bd0003,
-/* 0x0966: ctx_mmio_loop */
+/* 0x09be: ctx_mmio_loop */
        0x34c434bd,
        0x0f1bf4ff,
        0x020057f1,
        0xfa0653f0,
        0x03f80535,
-/* 0x0978: ctx_mmio_pull */
+/* 0x09d0: ctx_mmio_pull */
        0x98804e98,
        0x21f4814f,
        0x0830b69d,
        0xf40112b6,
-/* 0x098a: ctx_mmio_done */
+/* 0x09e2: ctx_mmio_done */
        0x0398df1b,
        0x0007f116,
        0x0203f081,
@@ -883,30 +905,30 @@ uint32_t nvf0_grhub_code[] = {
        0x010017f1,
        0xfa0613f0,
        0x03f80601,
-/* 0x09aa: ctx_xfer */
+/* 0x0a02: ctx_xfer */
        0xe7f000f8,
        0x0007f104,
        0x0303f002,
        0xbd000ed0,
-/* 0x09b9: ctx_xfer_idle */
+/* 0x0a11: ctx_xfer_idle */
        0x00e7f104,
        0x03e3f000,
        0xf100eecf,
        0xf42000e4,
        0x11f4f21b,
        0x0d02f406,
-/* 0x09d0: ctx_xfer_pre */
+/* 0x0a28: ctx_xfer_pre */
        0xf510f7f0,
-       0xf407de21,
-/* 0x09da: ctx_xfer_pre_load */
+       0xf4083621,
+/* 0x0a32: ctx_xfer_pre_load */
        0xf7f01c11,
-       0x7f21f502,
-       0x9121f507,
-       0xa621f507,
+       0xd721f502,
+       0xe921f507,
+       0xfe21f507,
        0xf5f4bd07,
-       0xf5077f21,
-/* 0x09f3: ctx_xfer_exec */
-       0x98082421,
+       0xf507d721,
+/* 0x0a4b: ctx_xfer_exec */
+       0x98087c21,
        0x24bd1601,
        0x050007f1,
        0xd00103f0,
@@ -941,21 +963,21 @@ uint32_t nvf0_grhub_code[] = {
        0xa7f01301,
        0xd021f40c,
        0xf505f7f0,
-       0xf4080621,
-/* 0x0a82: ctx_xfer_post */
+       0xf4085e21,
+/* 0x0ada: ctx_xfer_post */
        0xf7f02e02,
-       0x7f21f502,
+       0xd721f502,
        0xf5f4bd07,
-       0xf507de21,
+       0xf5083621,
        0xf5027f21,
-       0xbd079121,
-       0x7f21f5f4,
+       0xbd07e921,
+       0xd721f5f4,
        0x1011f407,
        0xfd400198,
        0x0bf40511,
-       0x5521f507,
-/* 0x0aad: ctx_xfer_no_post_mmio */
-/* 0x0aad: ctx_xfer_done */
+       0xad21f507,
+/* 0x0b05: ctx_xfer_no_post_mmio */
+/* 0x0b05: ctx_xfer_done */
        0x0000f809,
        0x00000000,
        0x00000000,
@@ -977,4 +999,46 @@ uint32_t nvf0_grhub_code[] = {
        0x00000000,
        0x00000000,
        0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
+       0x00000000,
 };
index a47d49d..2a0b0f8 100644 (file)
 #define GK110 0xf0
 #define GK208 0x108
 
+#define NV_PGRAPH_TRAPPED_ADDR                                         0x400704
+#define NV_PGRAPH_TRAPPED_DATA_LO                                      0x400708
+#define NV_PGRAPH_TRAPPED_DATA_HI                                      0x40070c
+
+#define NV_PGRAPH_FE_OBJECT_TABLE(n)                        ((n) * 4 + 0x400700)
+
 #define NV_PGRAPH_FECS_INTR_ACK                                        0x409004
 #define NV_PGRAPH_FECS_INTR                                            0x409008
 #define NV_PGRAPH_FECS_INTR_FWMTHD                                   0x00000400
index fd1d380..1718ae4 100644 (file)
@@ -3,5 +3,6 @@
 
 #define E_BAD_COMMAND  0x00000001
 #define E_CMD_OVERFLOW 0x00000002
+#define E_BAD_FWMTHD   0x00000003
 
 #endif
index 1a2d564..20665c2 100644 (file)
@@ -976,7 +976,6 @@ nv50_graph_init(struct nouveau_object *object)
                break;
        case 0xa0:
        default:
-               nv_wr32(priv, 0x402cc0, 0x00000000);
                if (nv_device(priv)->chipset == 0xa0 ||
                    nv_device(priv)->chipset == 0xaa ||
                    nv_device(priv)->chipset == 0xac) {
@@ -991,10 +990,10 @@ nv50_graph_init(struct nouveau_object *object)
 
        /* zero out zcull regions */
        for (i = 0; i < 8; i++) {
-               nv_wr32(priv, 0x402c20 + (i * 8), 0x00000000);
-               nv_wr32(priv, 0x402c24 + (i * 8), 0x00000000);
-               nv_wr32(priv, 0x402c28 + (i * 8), 0x00000000);
-               nv_wr32(priv, 0x402c2c + (i * 8), 0x00000000);
+               nv_wr32(priv, 0x402c20 + (i * 0x10), 0x00000000);
+               nv_wr32(priv, 0x402c24 + (i * 0x10), 0x00000000);
+               nv_wr32(priv, 0x402c28 + (i * 0x10), 0x00000000);
+               nv_wr32(priv, 0x402c2c + (i * 0x10), 0x00000000);
        }
        return 0;
 }
index bf7bdb1..aa08389 100644 (file)
@@ -789,17 +789,40 @@ nvc0_graph_ctxctl_debug(struct nvc0_graph_priv *priv)
 static void
 nvc0_graph_ctxctl_isr(struct nvc0_graph_priv *priv)
 {
-       u32 ustat = nv_rd32(priv, 0x409c18);
+       u32 stat = nv_rd32(priv, 0x409c18);
 
-       if (ustat & 0x00000001)
-               nv_error(priv, "CTXCTL ucode error\n");
-       if (ustat & 0x00080000)
-               nv_error(priv, "CTXCTL watchdog timeout\n");
-       if (ustat & ~0x00080001)
-               nv_error(priv, "CTXCTL 0x%08x\n", ustat);
+       if (stat & 0x00000001) {
+               u32 code = nv_rd32(priv, 0x409814);
+               if (code == E_BAD_FWMTHD) {
+                       u32 class = nv_rd32(priv, 0x409808);
+                       u32  addr = nv_rd32(priv, 0x40980c);
+                       u32  subc = (addr & 0x00070000) >> 16;
+                       u32  mthd = (addr & 0x00003ffc);
+                       u32  data = nv_rd32(priv, 0x409810);
+
+                       nv_error(priv, "FECS MTHD subc %d class 0x%04x "
+                                      "mthd 0x%04x data 0x%08x\n",
+                                subc, class, mthd, data);
 
-       nvc0_graph_ctxctl_debug(priv);
-       nv_wr32(priv, 0x409c20, ustat);
+                       nv_wr32(priv, 0x409c20, 0x00000001);
+                       stat &= ~0x00000001;
+               } else {
+                       nv_error(priv, "FECS ucode error %d\n", code);
+               }
+       }
+
+       if (stat & 0x00080000) {
+               nv_error(priv, "FECS watchdog timeout\n");
+               nvc0_graph_ctxctl_debug(priv);
+               nv_wr32(priv, 0x409c20, 0x00080000);
+               stat &= ~0x00080000;
+       }
+
+       if (stat) {
+               nv_error(priv, "FECS 0x%08x\n", stat);
+               nvc0_graph_ctxctl_debug(priv);
+               nv_wr32(priv, 0x409c20, stat);
+       }
 }
 
 static void
index 75203a9..ffc2891 100644 (file)
@@ -38,6 +38,8 @@
 #include <engine/fifo.h>
 #include <engine/graph.h>
 
+#include "fuc/os.h"
+
 #define GPC_MAX 32
 #define TPC_MAX (GPC_MAX * 8)
 
index db1b39d..825f7bb 100644 (file)
@@ -84,6 +84,7 @@ extern struct nouveau_oclass *nv4e_i2c_oclass;
 extern struct nouveau_oclass *nv50_i2c_oclass;
 extern struct nouveau_oclass *nv94_i2c_oclass;
 extern struct nouveau_oclass *nvd0_i2c_oclass;
+extern struct nouveau_oclass *gf117_i2c_oclass;
 extern struct nouveau_oclass *nve0_i2c_oclass;
 
 static inline int
index 4ac1aa3..0e62a32 100644 (file)
@@ -307,7 +307,6 @@ calc_clk(struct nve0_clock_priv *priv,
                info->dsrc = src0;
                if (div0) {
                        info->ddiv |= 0x80000000;
-                       info->ddiv |= div0 << 8;
                        info->ddiv |= div0;
                }
                if (div1D) {
@@ -352,7 +351,7 @@ nve0_clock_prog_0(struct nve0_clock_priv *priv, int clk)
 {
        struct nve0_clock_info *info = &priv->eng[clk];
        if (!info->ssel) {
-               nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x80003f3f, info->ddiv);
+               nv_mask(priv, 0x1371d0 + (clk * 0x04), 0x8000003f, info->ddiv);
                nv_wr32(priv, 0x137160 + (clk * 0x04), info->dsrc);
        }
 }
@@ -389,7 +388,10 @@ static void
 nve0_clock_prog_3(struct nve0_clock_priv *priv, int clk)
 {
        struct nve0_clock_info *info = &priv->eng[clk];
-       nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f3f, info->mdiv);
+       if (info->ssel)
+               nv_mask(priv, 0x137250 + (clk * 0x04), 0x00003f00, info->mdiv);
+       else
+               nv_mask(priv, 0x137250 + (clk * 0x04), 0x0000003f, info->mdiv);
 }
 
 static void
index 0f57fcf..2af9cfd 100644 (file)
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
        };
 }
 
-static inline struct ramfuc_reg
+static noinline struct ramfuc_reg
 ramfuc_reg(u32 addr)
 {
        return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
 
 #define ram_init(s,p)       ramfuc_init(&(s)->base, (p))
 #define ram_exec(s,e)       ramfuc_exec(&(s)->base, (e))
-#define ram_have(s,r)       ((s)->r_##r.addr != 0x000000)
+#define ram_have(s,r)       ((s)->r_##r.addr[0] != 0x000000)
 #define ram_rd32(s,r)       ramfuc_rd32(&(s)->base, &(s)->r_##r)
 #define ram_wr32(s,r,d)     ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
 #define ram_nuke(s,r)       ramfuc_nuke(&(s)->base, &(s)->r_##r)
index 84c7efb..c5b46e3 100644 (file)
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
        /* (re)program mempll, if required */
        if (ram->mode == 2) {
                ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
+               ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
                ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
                ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
                ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
@@ -262,8 +263,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        struct nve0_ram *ram = (void *)pfb->ram;
        struct nve0_ramfuc *fuc = &ram->fuc;
        struct nouveau_ram_data *next = ram->base.next;
-       int vc = !(next->bios.ramcfg_11_02_08);
-       int mv = !(next->bios.ramcfg_11_02_04);
+       int vc = !next->bios.ramcfg_11_02_08;
+       int mv = !next->bios.ramcfg_11_02_04;
        u32 mask, data;
 
        ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -370,8 +371,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                }
        }
 
-       if ( (next->bios.ramcfg_11_02_40) ||
-            (next->bios.ramcfg_11_07_10)) {
+       if (next->bios.ramcfg_11_02_40 ||
+           next->bios.ramcfg_11_07_10) {
                ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
                ram_nsec(fuc, 20000);
        }
@@ -417,7 +418,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_mask(fuc, 0x10f694, 0xff00ff00, data);
        }
 
-       if (ram->mode == 2 && (next->bios.ramcfg_11_08_10))
+       if (ram->mode == 2 && next->bios.ramcfg_11_08_10)
                data = 0x00000080;
        else
                data = 0x00000000;
@@ -425,13 +426,13 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x00070000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_02_80))
+       if (!next->bios.ramcfg_11_02_80)
                data |= 0x03000000;
-       if (!(next->bios.ramcfg_11_02_40))
+       if (!next->bios.ramcfg_11_02_40)
                data |= 0x00002000;
-       if (!(next->bios.ramcfg_11_07_10))
+       if (!next->bios.ramcfg_11_07_10)
                data |= 0x00004000;
-       if (!(next->bios.ramcfg_11_07_08))
+       if (!next->bios.ramcfg_11_07_08)
                data |= 0x00000003;
        else
                data |= 0x74000000;
@@ -486,7 +487,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        data = mask = 0x00000000;
        if (NOTE00(ramcfg_02_03 != 0)) {
-               data |= (next->bios.ramcfg_11_02_03) << 8;
+               data |= next->bios.ramcfg_11_02_03 << 8;
                mask |= 0x00000300;
        }
        if (NOTE00(ramcfg_01_10)) {
@@ -498,7 +499,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        data = mask = 0x00000000;
        if (NOTE00(timing_30_07 != 0)) {
-               data |= (next->bios.timing_20_30_07) << 28;
+               data |= next->bios.timing_20_30_07 << 28;
                mask |= 0x70000000;
        }
        if (NOTE00(ramcfg_01_01)) {
@@ -510,7 +511,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        data = mask = 0x00000000;
        if (NOTE00(timing_30_07 != 0)) {
-               data |= (next->bios.timing_20_30_07) << 28;
+               data |= next->bios.timing_20_30_07 << 28;
                mask |= 0x70000000;
        }
        if (NOTE00(ramcfg_01_02)) {
@@ -522,16 +523,16 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x33f00000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_01_04))
+       if (!next->bios.ramcfg_11_01_04)
                data |= 0x20200000;
-       if (!(next->bios.ramcfg_11_07_80))
+       if (!next->bios.ramcfg_11_07_80)
                data |= 0x12800000;
        /*XXX: see note above about there probably being some condition
         *     for the 10f824 stuff that uses ramcfg 3...
         */
-       if ( (next->bios.ramcfg_11_03_f0)) {
+       if (next->bios.ramcfg_11_03_f0) {
                if (next->bios.rammap_11_08_0c) {
-                       if (!(next->bios.ramcfg_11_07_80))
+                       if (!next->bios.ramcfg_11_07_80)
                                mask |= 0x00000020;
                        else
                                data |= 0x00000020;
@@ -563,7 +564,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_wait(fuc, 0x100710, 0x80000000, 0x80000000, 200000);
        }
 
-       data = (next->bios.timing_20_30_07) << 8;
+       data = next->bios.timing_20_30_07 << 8;
        if (next->bios.ramcfg_11_01_01)
                data |= 0x80000000;
        ram_mask(fuc, 0x100778, 0x00000700, data);
@@ -588,7 +589,7 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        ram_wr32(fuc, 0x10f310, 0x00000001); /* REFRESH */
        ram_wr32(fuc, 0x10f210, 0x80000000); /* REFRESH_AUTO = 1 */
 
-       if ((next->bios.ramcfg_11_08_10) && (ram->mode == 2) /*XXX*/) {
+       if (next->bios.ramcfg_11_08_10 && (ram->mode == 2) /*XXX*/) {
                u32 temp = ram_mask(fuc, 0x10f294, 0xff000000, 0x24000000);
                nve0_ram_train(fuc, 0xbc0e0000, 0xa4010000); /*XXX*/
                ram_nsec(fuc, 1000);
@@ -621,8 +622,8 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
        data  = ram_rd32(fuc, 0x10f978);
        data &= ~0x00046144;
        data |=  0x0000000b;
-       if (!(next->bios.ramcfg_11_07_08)) {
-               if (!(next->bios.ramcfg_11_07_04))
+       if (!next->bios.ramcfg_11_07_08) {
+               if (!next->bios.ramcfg_11_07_04)
                        data |= 0x0000200c;
                else
                        data |= 0x00000000;
@@ -636,11 +637,11 @@ nve0_ram_calc_gddr5(struct nouveau_fb *pfb, u32 freq)
                ram_wr32(fuc, 0x10f830, data);
        }
 
-       if (!(next->bios.ramcfg_11_07_08)) {
+       if (!next->bios.ramcfg_11_07_08) {
                data = 0x88020000;
-               if ( (next->bios.ramcfg_11_07_04))
+               if ( next->bios.ramcfg_11_07_04)
                        data |= 0x10000000;
-               if (!(next->bios.rammap_11_08_10))
+               if (!next->bios.rammap_11_08_10)
                        data |= 0x00080000;
        } else {
                data = 0xa40e0000;
@@ -689,8 +690,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
        const u32 runk0 = ram->fN1 << 16;
        const u32 runk1 = ram->fN1;
        struct nouveau_ram_data *next = ram->base.next;
-       int vc = !(next->bios.ramcfg_11_02_08);
-       int mv = !(next->bios.ramcfg_11_02_04);
+       int vc = !next->bios.ramcfg_11_02_08;
+       int mv = !next->bios.ramcfg_11_02_04;
        u32 mask, data;
 
        ram_mask(fuc, 0x10f808, 0x40000000, 0x40000000);
@@ -705,7 +706,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
        }
 
        ram_mask(fuc, 0x10f200, 0x00000800, 0x00000000);
-       if ((next->bios.ramcfg_11_03_f0))
+       if (next->bios.ramcfg_11_03_f0)
                ram_mask(fuc, 0x10f808, 0x04000000, 0x04000000);
 
        ram_wr32(fuc, 0x10f314, 0x00000001); /* PRECHARGE */
@@ -761,7 +762,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        ram_mask(fuc, 0x1373f4, 0x00000000, 0x00010010);
        data  = ram_rd32(fuc, 0x1373ec) & ~0x00030000;
-       data |= (next->bios.ramcfg_11_03_30) << 12;
+       data |= next->bios.ramcfg_11_03_30 << 16;
        ram_wr32(fuc, 0x1373ec, data);
        ram_mask(fuc, 0x1373f4, 0x00000003, 0x00000000);
        ram_mask(fuc, 0x1373f4, 0x00000010, 0x00000000);
@@ -793,8 +794,8 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
                }
        }
 
-       if ( (next->bios.ramcfg_11_02_40) ||
-            (next->bios.ramcfg_11_07_10)) {
+       if (next->bios.ramcfg_11_02_40 ||
+           next->bios.ramcfg_11_07_10) {
                ram_mask(fuc, 0x132040, 0x00010000, 0x00010000);
                ram_nsec(fuc, 20000);
        }
@@ -810,13 +811,13 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x00010000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_02_80))
+       if (!next->bios.ramcfg_11_02_80)
                data |= 0x03000000;
-       if (!(next->bios.ramcfg_11_02_40))
+       if (!next->bios.ramcfg_11_02_40)
                data |= 0x00002000;
-       if (!(next->bios.ramcfg_11_07_10))
+       if (!next->bios.ramcfg_11_07_10)
                data |= 0x00004000;
-       if (!(next->bios.ramcfg_11_07_08))
+       if (!next->bios.ramcfg_11_07_08)
                data |= 0x00000003;
        else
                data |= 0x14000000;
@@ -844,16 +845,16 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
 
        mask = 0x33f00000;
        data = 0x00000000;
-       if (!(next->bios.ramcfg_11_01_04))
+       if (!next->bios.ramcfg_11_01_04)
                data |= 0x20200000;
-       if (!(next->bios.ramcfg_11_07_80))
+       if (!next->bios.ramcfg_11_07_80)
                data |= 0x12800000;
        /*XXX: see note above about there probably being some condition
         *     for the 10f824 stuff that uses ramcfg 3...
         */
-       if ( (next->bios.ramcfg_11_03_f0)) {
+       if (next->bios.ramcfg_11_03_f0) {
                if (next->bios.rammap_11_08_0c) {
-                       if (!(next->bios.ramcfg_11_07_80))
+                       if (!next->bios.ramcfg_11_07_80)
                                mask |= 0x00000020;
                        else
                                data |= 0x00000020;
@@ -876,7 +877,7 @@ nve0_ram_calc_sddr3(struct nouveau_fb *pfb, u32 freq)
                data = next->bios.timing_20_2c_1fc0;
        ram_mask(fuc, 0x10f24c, 0x7f000000, data << 24);
 
-       ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8);
+       ram_mask(fuc, 0x10f224, 0x001f0000, next->bios.timing_20_30_f8 << 16);
 
        ram_wr32(fuc, 0x10f090, 0x4000007f);
        ram_nsec(fuc, 1000);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c b/drivers/gpu/drm/nouveau/core/subdev/i2c/gf117.c
new file mode 100644 (file)
index 0000000..fa891c3
--- /dev/null
@@ -0,0 +1,39 @@
+/*
+ * Copyright 2012 Red Hat Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Ben Skeggs
+ */
+
+#include "nv50.h"
+
+struct nouveau_oclass *
+gf117_i2c_oclass = &(struct nouveau_i2c_impl) {
+       .base.handle = NV_SUBDEV(I2C, 0xd7),
+       .base.ofuncs = &(struct nouveau_ofuncs) {
+               .ctor = _nouveau_i2c_ctor,
+               .dtor = _nouveau_i2c_dtor,
+               .init = _nouveau_i2c_init,
+               .fini = _nouveau_i2c_fini,
+       },
+       .sclass = nvd0_i2c_sclass,
+       .pad_x = &nv04_i2c_pad_oclass,
+       .pad_s = &nv04_i2c_pad_oclass,
+}.base;
index 7120124..ebef970 100644 (file)
@@ -94,6 +94,23 @@ nve0_ibus_intr(struct nouveau_subdev *subdev)
        }
 }
 
+static int
+nve0_ibus_init(struct nouveau_object *object)
+{
+       struct nve0_ibus_priv *priv = (void *)object;
+       int ret = nouveau_ibus_init(&priv->base);
+       if (ret == 0) {
+               nv_mask(priv, 0x122318, 0x0003ffff, 0x00001000);
+               nv_mask(priv, 0x12231c, 0x0003ffff, 0x00000200);
+               nv_mask(priv, 0x122310, 0x0003ffff, 0x00000800);
+               nv_mask(priv, 0x122348, 0x0003ffff, 0x00000100);
+               nv_mask(priv, 0x1223b0, 0x0003ffff, 0x00000fff);
+               nv_mask(priv, 0x122348, 0x0003ffff, 0x00000200);
+               nv_mask(priv, 0x122358, 0x0003ffff, 0x00002880);
+       }
+       return ret;
+}
+
 static int
 nve0_ibus_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
               struct nouveau_oclass *oclass, void *data, u32 size,
@@ -117,7 +134,7 @@ nve0_ibus_oclass = {
        .ofuncs = &(struct nouveau_ofuncs) {
                .ctor = nve0_ibus_ctor,
                .dtor = _nouveau_ibus_dtor,
-               .init = _nouveau_ibus_init,
+               .init = nve0_ibus_init,
                .fini = _nouveau_ibus_fini,
        },
 };
index 2284ecb..c2bb616 100644 (file)
@@ -83,7 +83,7 @@ host_send:
                // increment GET
                add b32 $r1 0x1
                and $r14 $r1 #fifo_qmaskf
-               nv_iowr(NV_PPWR_FIFO_GET(0), $r1)
+               nv_iowr(NV_PPWR_FIFO_GET(0), $r14)
                bra #host_send
        host_send_done:
        ret
index 4bd43a9..39a5dc1 100644 (file)
@@ -1018,7 +1018,7 @@ uint32_t nv108_pwr_code[] = {
        0xb600023f,
        0x1ec40110,
        0x04b0400f,
-       0xbd0001f6,
+       0xbd000ef6,
        0xc70ef404,
 /* 0x0328: host_send_done */
 /* 0x032a: host_recv */
index 5a73fa6..254205c 100644 (file)
@@ -1124,7 +1124,7 @@ uint32_t nva3_pwr_code[] = {
        0x0f1ec401,
        0x04b007f1,
        0xd00604b6,
-       0x04bd0001,
+       0x04bd000e,
 /* 0x03cb: host_send_done */
        0xf8ba0ef4,
 /* 0x03cd: host_recv */
index 4dba00d..7ac8740 100644 (file)
@@ -1124,7 +1124,7 @@ uint32_t nvc0_pwr_code[] = {
        0x0f1ec401,
        0x04b007f1,
        0xd00604b6,
-       0x04bd0001,
+       0x04bd000e,
 /* 0x03cb: host_send_done */
        0xf8ba0ef4,
 /* 0x03cd: host_recv */
index 5e24c6b..cd9ff1a 100644 (file)
@@ -1033,7 +1033,7 @@ uint32_t nvd0_pwr_code[] = {
        0xb6026b21,
        0x1ec40110,
        0xb007f10f,
-       0x0001d004,
+       0x000ed004,
        0x0ef404bd,
 /* 0x0365: host_send_done */
 /* 0x0367: host_recv */
index cfde9eb..6212537 100644 (file)
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
        nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
                                             NOUVEAU_THERM_THRS_SHUTDOWN);
 
+       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+
        /* schedule the next poll in one second */
        if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
-               ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm);
-
-       spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
+               ptimer->alarm(ptimer, 1000000000ULL, alarm);
 }
 
 void
index 26b5647..47ad742 100644 (file)
@@ -736,6 +736,9 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
                  fb->bits_per_pixel, fb->pitches[0], crtc->x, crtc->y,
                  new_bo->bo.offset };
 
+       /* Keep vblanks on during flip, for the target crtc of this flip */
+       drm_vblank_get(dev, nouveau_crtc(crtc)->index);
+
        /* Emit a page flip */
        if (nv_device(drm->device)->card_type >= NV_50) {
                ret = nv50_display_flip_next(crtc, fb, chan, swap_interval);
@@ -779,6 +782,7 @@ nouveau_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *fb,
        return 0;
 
 fail_unreserve:
+       drm_vblank_put(dev, nouveau_crtc(crtc)->index);
        ttm_bo_unreserve(&old_bo->bo);
 fail_unpin:
        mutex_unlock(&chan->cli->mutex);
@@ -817,6 +821,9 @@ nouveau_finish_page_flip(struct nouveau_channel *chan,
                drm_send_vblank_event(dev, crtcid, s->event);
        }
 
+       /* Give up ownership of vblank for page-flipped crtc */
+       drm_vblank_put(dev, s->crtc);
+
        list_del(&s->head);
        if (ps)
                *ps = *s;
index ddd8375..5425ffe 100644 (file)
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
        ret = nouveau_do_resume(drm_dev);
        if (ret)
                return ret;
-       if (drm_dev->mode_config.num_crtc)
-               nouveau_fbcon_set_suspend(drm_dev, 0);
 
-       nouveau_fbcon_zfill_all(drm_dev);
-       if (drm_dev->mode_config.num_crtc)
+       if (drm_dev->mode_config.num_crtc) {
                nouveau_display_resume(drm_dev);
+               nouveau_fbcon_set_suspend(drm_dev, 0);
+       }
+
        return 0;
 }
 
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
        ret = nouveau_do_resume(drm_dev);
        if (ret)
                return ret;
-       if (drm_dev->mode_config.num_crtc)
-               nouveau_fbcon_set_suspend(drm_dev, 0);
-       nouveau_fbcon_zfill_all(drm_dev);
-       if (drm_dev->mode_config.num_crtc)
+
+       if (drm_dev->mode_config.num_crtc) {
                nouveau_display_resume(drm_dev);
+               nouveau_fbcon_set_suspend(drm_dev, 0);
+       }
+
        return 0;
 }
 
index 64a42cf..191665e 100644 (file)
@@ -531,17 +531,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
                if (state == 1)
                        nouveau_fbcon_save_disable_accel(dev);
                fb_set_suspend(drm->fbcon->helper.fbdev, state);
-               if (state == 0)
+               if (state == 0) {
                        nouveau_fbcon_restore_accel(dev);
+                       nouveau_fbcon_zfill(dev, drm->fbcon);
+               }
                console_unlock();
        }
 }
-
-void
-nouveau_fbcon_zfill_all(struct drm_device *dev)
-{
-       struct nouveau_drm *drm = nouveau_drm(dev);
-       if (drm->fbcon) {
-               nouveau_fbcon_zfill(dev, drm->fbcon);
-       }
-}
index fdfc0c9..fcff797 100644 (file)
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
 int nouveau_fbcon_init(struct drm_device *dev);
 void nouveau_fbcon_fini(struct drm_device *dev);
 void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
-void nouveau_fbcon_zfill_all(struct drm_device *dev);
 void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
 void nouveau_fbcon_restore_accel(struct drm_device *dev);
 
index afdf607..4c534b7 100644 (file)
@@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
                }
        }
 
-       mthd  = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
+       mthd  = (ffs(nv_encoder->dcb->heads) - 1) << 3;
+       mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
        mthd |= nv_encoder->or;
 
        if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
index 34d6a85..0bf1e20 100644 (file)
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
 
        pending = xchg(&qdev->ram_header->int_pending, 0);
 
+       if (!pending)
+               return IRQ_NONE;
+
        atomic_inc(&qdev->irq_received);
 
        if (pending & QXL_INTERRUPT_DISPLAY) {
index 26c12a3..30d242b 100644 (file)
@@ -1052,7 +1052,7 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
        int encoder_mode = atombios_get_encoder_mode(radeon_crtc->encoder);
 
        /* pass the actual clock to atombios_crtc_program_pll for DCE5,6 for HDMI */
-       if (ASIC_IS_DCE5(rdev) && !ASIC_IS_DCE8(rdev) &&
+       if (ASIC_IS_DCE5(rdev) &&
            (encoder_mode == ATOM_ENCODER_MODE_HDMI) &&
            (radeon_crtc->bpc > 8))
                clock = radeon_crtc->adjusted_clock;
@@ -1136,6 +1136,7 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        u32 fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_NONE);
        u32 tmp, viewport_w, viewport_h;
        int r;
+       bool bypass_lut = false;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -1174,33 +1175,73 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(rbo);
 
-       switch (target_fb->bits_per_pixel) {
-       case 8:
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_8BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_INDEXED));
                break;
-       case 15:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB4444));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
+       case DRM_FORMAT_ARGB1555:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB1555));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
+               break;
+       case DRM_FORMAT_BGRX5551:
+       case DRM_FORMAT_BGRA5551:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA5551));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
+#endif
                break;
-       case 16:
+       case DRM_FORMAT_RGB565:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_16BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB565));
 #ifdef __BIG_ENDIAN
                fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN16);
 #endif
                break;
-       case 24:
-       case 32:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
                fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
                             EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB8888));
 #ifdef __BIG_ENDIAN
                fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
 #endif
                break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_ARGB2101010));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
+       case DRM_FORMAT_BGRX1010102:
+       case DRM_FORMAT_BGRA1010102:
+               fb_format = (EVERGREEN_GRPH_DEPTH(EVERGREEN_GRPH_DEPTH_32BPP) |
+                            EVERGREEN_GRPH_FORMAT(EVERGREEN_GRPH_FORMAT_BGRA1010102));
+#ifdef __BIG_ENDIAN
+               fb_swap = EVERGREEN_GRPH_ENDIAN_SWAP(EVERGREEN_GRPH_ENDIAN_8IN32);
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
        default:
-               DRM_ERROR("Unsupported screen depth %d\n",
-                         target_fb->bits_per_pixel);
+               DRM_ERROR("Unsupported screen format %s\n",
+                         drm_get_format_name(target_fb->pixel_format));
                return -EINVAL;
        }
 
@@ -1329,6 +1370,18 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        WREG32(EVERGREEN_GRPH_CONTROL + radeon_crtc->crtc_offset, fb_format);
        WREG32(EVERGREEN_GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
+       /*
+        * The LUT only has 256 slots for indexing by a 8 bpc fb. Bypass the LUT
+        * for > 8 bpc scanout to avoid truncation of fb indices to 8 msb's, to
+        * retain the full precision throughout the pipeline.
+        */
+       WREG32_P(EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL + radeon_crtc->crtc_offset,
+                (bypass_lut ? EVERGREEN_LUT_10BIT_BYPASS_EN : 0),
+                ~EVERGREEN_LUT_10BIT_BYPASS_EN);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
        WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
        WREG32(EVERGREEN_GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
        WREG32(EVERGREEN_GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1361,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
        tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
        WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
-       /* set pageflip to happen anywhere in vblank interval */
-       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
@@ -1396,6 +1449,7 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        u32 fb_swap = R600_D1GRPH_SWAP_ENDIAN_NONE;
        u32 tmp, viewport_w, viewport_h;
        int r;
+       bool bypass_lut = false;
 
        /* no fb bound */
        if (!atomic && !crtc->primary->fb) {
@@ -1433,18 +1487,30 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
        radeon_bo_unreserve(rbo);
 
-       switch (target_fb->bits_per_pixel) {
-       case 8:
+       switch (target_fb->pixel_format) {
+       case DRM_FORMAT_C8:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_8BPP |
                    AVIVO_D1GRPH_CONTROL_8BPP_INDEXED;
                break;
-       case 15:
+       case DRM_FORMAT_XRGB4444:
+       case DRM_FORMAT_ARGB4444:
+               fb_format =
+                   AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
+                   AVIVO_D1GRPH_CONTROL_16BPP_ARGB4444;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
+               break;
+       case DRM_FORMAT_XRGB1555:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
                    AVIVO_D1GRPH_CONTROL_16BPP_ARGB1555;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
+#endif
                break;
-       case 16:
+       case DRM_FORMAT_RGB565:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_16BPP |
                    AVIVO_D1GRPH_CONTROL_16BPP_RGB565;
@@ -1452,8 +1518,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
                fb_swap = R600_D1GRPH_SWAP_ENDIAN_16BIT;
 #endif
                break;
-       case 24:
-       case 32:
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
                fb_format =
                    AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
                    AVIVO_D1GRPH_CONTROL_32BPP_ARGB8888;
@@ -1461,9 +1527,20 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
                fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
 #endif
                break;
+       case DRM_FORMAT_XRGB2101010:
+       case DRM_FORMAT_ARGB2101010:
+               fb_format =
+                   AVIVO_D1GRPH_CONTROL_DEPTH_32BPP |
+                   AVIVO_D1GRPH_CONTROL_32BPP_ARGB2101010;
+#ifdef __BIG_ENDIAN
+               fb_swap = R600_D1GRPH_SWAP_ENDIAN_32BIT;
+#endif
+               /* Greater 8 bpc fb needs to bypass hw-lut to retain precision */
+               bypass_lut = true;
+               break;
        default:
-               DRM_ERROR("Unsupported screen depth %d\n",
-                         target_fb->bits_per_pixel);
+               DRM_ERROR("Unsupported screen format %s\n",
+                         drm_get_format_name(target_fb->pixel_format));
                return -EINVAL;
        }
 
@@ -1502,6 +1579,13 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        if (rdev->family >= CHIP_R600)
                WREG32(R600_D1GRPH_SWAP_CONTROL + radeon_crtc->crtc_offset, fb_swap);
 
+       /* LUT only has 256 slots for 8 bpc fb. Bypass for > 8 bpc scanout for precision */
+       WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset,
+                (bypass_lut ? AVIVO_LUT_10BIT_BYPASS_EN : 0), ~AVIVO_LUT_10BIT_BYPASS_EN);
+
+       if (bypass_lut)
+               DRM_DEBUG_KMS("Bypassing hardware LUT due to 10 bit fb scanout.\n");
+
        WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_X + radeon_crtc->crtc_offset, 0);
        WREG32(AVIVO_D1GRPH_SURFACE_OFFSET_Y + radeon_crtc->crtc_offset, 0);
        WREG32(AVIVO_D1GRPH_X_START + radeon_crtc->crtc_offset, 0);
@@ -1530,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
        tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
        WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
 
-       /* set pageflip to happen anywhere in vblank interval */
-       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0);
+       /* set pageflip to happen only at start of vblank interval (front porch) */
+       WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
 
        if (!atomic && fb && fb != crtc->primary->fb) {
                radeon_fb = to_radeon_framebuffer(fb);
index c5b1f2d..b1e11f8 100644 (file)
@@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
        /* flags not zero */
        if (args.v1.ucReplyStatus == 2) {
                DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
-               r = -EBUSY;
+               r = -EIO;
                goto done;
        }
 
@@ -403,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
 {
        struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
        u8 msg[DP_DPCD_SIZE];
-       int ret, i;
+       int ret;
+
+       char dpcd_hex_dump[DP_DPCD_SIZE * 3];
 
        ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
                               DP_DPCD_SIZE);
        if (ret > 0) {
                memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
-               DRM_DEBUG_KMS("DPCD: ");
-               for (i = 0; i < DP_DPCD_SIZE; i++)
-                       DRM_DEBUG_KMS("%02x ", msg[i]);
-               DRM_DEBUG_KMS("\n");
+
+               hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
+                                  32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
+               DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
 
                radeon_dp_probe_oui(radeon_connector);
 
index 2b29084..7d68203 100644 (file)
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
        struct backlight_properties props;
        struct radeon_backlight_privdata *pdata;
        struct radeon_encoder_atom_dig *dig;
-       u8 backlight_level;
        char bl_name[16];
 
        /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
 
        pdata->encoder = radeon_encoder;
 
-       backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
-
        dig = radeon_encoder->enc_priv;
        dig->bl_dev = bd;
 
        bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
+       /* Set a reasonable default here if the level is 0 otherwise
+        * fbdev will attempt to turn the backlight on after console
+        * unblanking and it will try and restore 0 which turns the backlight
+        * off again.
+        */
+       if (bd->props.brightness == 0)
+               bd->props.brightness = RADEON_MAX_BL_LEVEL;
        bd->props.power = FB_BLANK_UNBLANK;
        backlight_update_status(bd);
 
index 10dae41..584090a 100644 (file)
@@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
        tmp &= ~GLOBAL_PWRMGT_EN;
        WREG32_SMC(GENERAL_PWRMGT, tmp);
 
-       tmp = RREG32(SCLK_PWRMGT_CNTL);
+       tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
        tmp &= ~DYNAMIC_PM_EN;
        WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
 
index dcd4518..c0ea661 100644 (file)
@@ -2291,6 +2291,7 @@ static void cik_tiling_mode_table_init(struct radeon_device *rdev)
                                gb_tile_moden = 0;
                                break;
                        }
+                       rdev->config.cik.macrotile_mode_array[reg_offset] = gb_tile_moden;
                        WREG32(GB_MACROTILE_MODE0 + (reg_offset * 4), gb_tile_moden);
                }
        } else if (num_pipe_configs == 8) {
@@ -7376,6 +7377,7 @@ static inline u32 cik_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -7676,14 +7678,16 @@ restart_ih:
                        addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
                        status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
                        mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       if (addr == 0x0 && status == 0x0)
+                               break;
                        dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
                                addr);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                                status);
                        cik_vm_decode_fault(rdev, status, addr, mc_client);
-                       /* reset addr and status */
-                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
                        break;
                case 167: /* VCE */
                        DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
index ae88660..0c6e1b5 100644 (file)
 #define                EOP_TC_WB_ACTION_EN                     (1 << 15) /* L2 */
 #define                EOP_TCL1_ACTION_EN                      (1 << 16)
 #define                EOP_TC_ACTION_EN                        (1 << 17) /* L2 */
+#define                EOP_TCL2_VOLATILE                       (1 << 24)
 #define                EOP_CACHE_POLICY(x)                     ((x) << 25)
                 /* 0 - LRU
                 * 1 - Stream
                 * 2 - Bypass
                 */
-#define                EOP_TCL2_VOLATILE                       (1 << 27)
 #define                DATA_SEL(x)                             ((x) << 29)
                 /* 0 - discard
                 * 1 - send low 32bit data
index 5a9a5f4..47d31e9 100644 (file)
@@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
 
                table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
                table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
-                       cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+                       cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
        }
 
        return 0;
index e2f6052..15e4f28 100644 (file)
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
        0x8c1c, 0xffffffff, 0x00001010,
        0x28350, 0xffffffff, 0x00000000,
        0xa008, 0xffffffff, 0x00010000,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x9508, 0xffffffff, 0x00000002,
        0x913c, 0x0000000f, 0x0000000a
 };
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
        0x8c1c, 0xffffffff, 0x00001010,
        0x28350, 0xffffffff, 0x00000000,
        0xa008, 0xffffffff, 0x00010000,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x9508, 0xffffffff, 0x00000002
 };
 
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
 static const u32 supersumo_golden_registers[] =
 {
        0x5eb4, 0xffffffff, 0x00000002,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x7030, 0xffffffff, 0x00000011,
        0x7c30, 0xffffffff, 0x00000011,
        0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
 static const u32 wrestler_golden_registers[] =
 {
        0x5eb4, 0xffffffff, 0x00000002,
-       0x5cc, 0xffffffff, 0x00000001,
+       0x5c4, 0xffffffff, 0x00000001,
        0x7030, 0xffffffff, 0x00000011,
        0x7c30, 0xffffffff, 0x00000011,
        0x6104, 0x01000300, 0x00000000,
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x3) != 0) {
-                               tmp &= ~0x3;
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
                                WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -4755,6 +4756,7 @@ static u32 evergreen_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -5066,14 +5068,16 @@ restart_ih:
                case 147:
                        addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
                        status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       if (addr == 0x0 && status == 0x0)
+                               break;
                        dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
                                addr);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                                status);
                        cayman_vm_decode_fault(rdev, status, addr);
-                       /* reset addr and status */
-                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
                        break;
                case 176: /* CP_INT in ring buffer */
                case 177: /* CP_INT in IB1 */
index a0f63ff..23bff59 100644 (file)
 #       define EVERGREEN_GRPH_ARRAY_LINEAR_ALIGNED      1
 #       define EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1      2
 #       define EVERGREEN_GRPH_ARRAY_2D_TILED_THIN1      4
+#define EVERGREEN_GRPH_LUT_10BIT_BYPASS_CONTROL         0x6808
+#       define EVERGREEN_LUT_10BIT_BYPASS_EN            (1 << 8)
 #define EVERGREEN_GRPH_SWAP_CONTROL                     0x680c
 #       define EVERGREEN_GRPH_ENDIAN_SWAP(x)            (((x) & 0x3) << 0)
 #       define EVERGREEN_GRPH_ENDIAN_NONE               0
 #       define EVERGREEN_CRTC_V_BLANK                   (1 << 0)
 #define EVERGREEN_CRTC_STATUS_POSITION                  0x6e90
 #define EVERGREEN_CRTC_STATUS_HV_COUNT                  0x6ea0
-#define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
 #define EVERGREEN_CRTC_UPDATE_LOCK                      0x6ed4
 #define EVERGREEN_MASTER_UPDATE_LOCK                    0x6ef4
 #define EVERGREEN_MASTER_UPDATE_MODE                    0x6ef8
index 3f6e817..9ef8c38 100644 (file)
@@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev)
        pi->caps_sclk_ds = true;
        pi->enable_auto_thermal_throttling = true;
        pi->disable_nb_ps3_in_battery = false;
-       pi->bapm_enable = false;
+       pi->bapm_enable = true;
        pi->voltage_drop_t = 0;
        pi->caps_sclk_throttle_low_notification = false;
        pi->caps_fps = false; /* true? */
index 004c931..01fc488 100644 (file)
@@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
 
                table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
                table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
-                       cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
+                       cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
        }
 }
 
index 1dd0d32..136b7bc 100644 (file)
  * block and vice versa.  This applies to GRPH, CUR, etc.
  */
 #define AVIVO_D1GRPH_LUT_SEL                                    0x6108
+#       define AVIVO_LUT_10BIT_BYPASS_EN                        (1 << 8)
 #define AVIVO_D1GRPH_PRIMARY_SURFACE_ADDRESS                    0x6110
 #define R700_D1GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6914
 #define R700_D2GRPH_PRIMARY_SURFACE_ADDRESS_HIGH                0x6114
index c66952d..3c69f58 100644 (file)
@@ -3795,6 +3795,7 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
index 4b0bbf8..60c47f8 100644 (file)
@@ -102,6 +102,7 @@ extern int radeon_runtime_pm;
 extern int radeon_hard_reset;
 extern int radeon_vm_size;
 extern int radeon_vm_block_size;
+extern int radeon_deep_color;
 
 /*
  * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -448,6 +449,7 @@ struct radeon_bo_va {
 
        /* protected by vm mutex */
        struct list_head                vm_list;
+       struct list_head                vm_status;
 
        /* constant after initialization */
        struct radeon_vm                *vm;
@@ -683,10 +685,9 @@ struct radeon_flip_work {
        struct work_struct              unpin_work;
        struct radeon_device            *rdev;
        int                             crtc_id;
-       struct drm_framebuffer          *fb;
+       uint64_t                        base;
        struct drm_pending_vblank_event *event;
        struct radeon_bo                *old_rbo;
-       struct radeon_bo                *new_rbo;
        struct radeon_fence             *fence;
 };
 
@@ -749,10 +750,6 @@ union radeon_irq_stat_regs {
        struct cik_irq_stat_regs cik;
 };
 
-#define RADEON_MAX_HPD_PINS 7
-#define RADEON_MAX_CRTCS 6
-#define RADEON_MAX_AFMT_BLOCKS 7
-
 struct radeon_irq {
        bool                            installed;
        spinlock_t                      lock;
@@ -871,6 +868,9 @@ struct radeon_vm {
        struct list_head                va;
        unsigned                        id;
 
+       /* BOs freed, but not yet updated in the PT */
+       struct list_head                freed;
+
        /* contains the page directory */
        struct radeon_bo                *page_directory;
        uint64_t                        pd_gpu_addr;
@@ -879,6 +879,8 @@ struct radeon_vm {
        /* array of page tables, one for each page directory entry */
        struct radeon_vm_pt             *page_tables;
 
+       struct radeon_bo_va             *ib_bo_va;
+
        struct mutex                    mutex;
        /* last fence for cs using this vm */
        struct radeon_fence             *fence;
@@ -2836,9 +2838,10 @@ void radeon_vm_fence(struct radeon_device *rdev,
 uint64_t radeon_vm_map_gart(struct radeon_device *rdev, uint64_t addr);
 int radeon_vm_update_page_directory(struct radeon_device *rdev,
                                    struct radeon_vm *vm);
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm);
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem);
 void radeon_vm_bo_invalidate(struct radeon_device *rdev,
                             struct radeon_bo *bo);
@@ -2851,8 +2854,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                          struct radeon_bo_va *bo_va,
                          uint64_t offset,
                          uint32_t flags);
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va);
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va);
 
 /* audio */
 void r600_audio_update_hdmi(struct work_struct *work);
index 3084481..173f378 100644 (file)
@@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
                        rdev->clock.default_dispclk =
                                le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
                        if (rdev->clock.default_dispclk == 0) {
-                               if (ASIC_IS_DCE5(rdev))
+                               if (ASIC_IS_DCE6(rdev))
+                                       rdev->clock.default_dispclk = 60000; /* 600 Mhz */
+                               else if (ASIC_IS_DCE5(rdev))
                                        rdev->clock.default_dispclk = 54000; /* 540 Mhz */
                                else
                                        rdev->clock.default_dispclk = 60000; /* 600 Mhz */
                        }
+                       /* set a reasonable default for DP */
+                       if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
+                               DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
+                                        rdev->clock.default_dispclk / 100);
+                               rdev->clock.default_dispclk = 60000;
+                       }
                        rdev->clock.dp_extclk =
                                le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
                        rdev->clock.current_dispclk = rdev->clock.default_dispclk;
index 933c5c3..4483119 100644 (file)
@@ -199,6 +199,9 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
                }
        }
 
+       if ((radeon_deep_color == 0) && (bpc > 8))
+               bpc = 8;
+
        DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
                          connector->name, connector->display_info.bpc, bpc);
 
@@ -1288,17 +1291,15 @@ static int radeon_dvi_mode_valid(struct drm_connector *connector,
                    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D) ||
                    (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_B))
                        return MODE_OK;
-               else if (radeon_connector->connector_object_id == CONNECTOR_OBJECT_ID_HDMI_TYPE_A) {
-                       if (ASIC_IS_DCE6(rdev)) {
-                               /* HDMI 1.3+ supports max clock of 340 Mhz */
-                               if (mode->clock > 340000)
-                                       return MODE_CLOCK_HIGH;
-                               else
-                                       return MODE_OK;
-                       } else
+               else if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                       /* HDMI 1.3+ supports max clock of 340 Mhz */
+                       if (mode->clock > 340000)
                                return MODE_CLOCK_HIGH;
-               } else
+                       else
+                               return MODE_OK;
+               } else {
                        return MODE_CLOCK_HIGH;
+               }
        }
 
        /* check against the max pixel clock */
@@ -1549,6 +1550,8 @@ out:
 static int radeon_dp_mode_valid(struct drm_connector *connector,
                                  struct drm_display_mode *mode)
 {
+       struct drm_device *dev = connector->dev;
+       struct radeon_device *rdev = dev->dev_private;
        struct radeon_connector *radeon_connector = to_radeon_connector(connector);
        struct radeon_connector_atom_dig *radeon_dig_connector = radeon_connector->con_priv;
 
@@ -1579,14 +1582,23 @@ static int radeon_dp_mode_valid(struct drm_connector *connector,
                                        return MODE_PANEL;
                        }
                }
-               return MODE_OK;
        } else {
                if ((radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) ||
-                   (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP))
+                   (radeon_dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_eDP)) {
                        return radeon_dp_mode_valid_helper(connector, mode);
-               else
-                       return MODE_OK;
+               } else {
+                       if (ASIC_IS_DCE6(rdev) && drm_detect_hdmi_monitor(radeon_connector->edid)) {
+                               /* HDMI 1.3+ supports max clock of 340 Mhz */
+                               if (mode->clock > 340000)
+                                       return MODE_CLOCK_HIGH;
+                       } else {
+                               if (mode->clock > 165000)
+                                       return MODE_CLOCK_HIGH;
+                       }
+               }
        }
+
+       return MODE_OK;
 }
 
 static const struct drm_connector_helper_funcs radeon_dp_connector_helper_funcs = {
index 71a1434..ae763f6 100644 (file)
@@ -461,13 +461,23 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                                   struct radeon_vm *vm)
 {
        struct radeon_device *rdev = p->rdev;
+       struct radeon_bo_va *bo_va;
        int i, r;
 
        r = radeon_vm_update_page_directory(rdev, vm);
        if (r)
                return r;
 
-       r = radeon_vm_bo_update(rdev, vm, rdev->ring_tmp_bo.bo,
+       r = radeon_vm_clear_freed(rdev, vm);
+       if (r)
+               return r;
+
+       if (vm->ib_bo_va == NULL) {
+               DRM_ERROR("Tmp BO not in VM!\n");
+               return -EINVAL;
+       }
+
+       r = radeon_vm_bo_update(rdev, vm->ib_bo_va,
                                &rdev->ring_tmp_bo.bo->tbo.mem);
        if (r)
                return r;
@@ -480,7 +490,13 @@ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *p,
                        continue;
 
                bo = p->relocs[i].robj;
-               r = radeon_vm_bo_update(rdev, vm, bo, &bo->tbo.mem);
+               bo_va = radeon_vm_bo_find(vm, bo);
+               if (bo_va == NULL) {
+                       dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
+                       return -EINVAL;
+               }
+
+               r = radeon_vm_bo_update(rdev, bo_va, &bo->tbo.mem);
                if (r)
                        return r;
        }
index 03686fa..697add2 100644 (file)
@@ -1056,36 +1056,36 @@ static void radeon_check_arguments(struct radeon_device *rdev)
        if (!radeon_check_pot_argument(radeon_vm_size)) {
                dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
-       if (radeon_vm_size < 4) {
-               dev_warn(rdev->dev, "VM size (%d) to small, min is 4MB\n",
+       if (radeon_vm_size < 1) {
+               dev_warn(rdev->dev, "VM size (%d) to small, min is 1GB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /*
         * Max GPUVM size for Cayman, SI and CI are 40 bits.
         */
-       if (radeon_vm_size > 1024*1024) {
-               dev_warn(rdev->dev, "VM size (%d) to large, max is 1TB\n",
+       if (radeon_vm_size > 1024) {
+               dev_warn(rdev->dev, "VM size (%d) too large, max is 1TB\n",
                         radeon_vm_size);
-               radeon_vm_size = 4096;
+               radeon_vm_size = 4;
        }
 
        /* defines number of bits in page table versus page directory,
         * a page is 4KB so we have 12 bits offset, minimum 9 bits in the
         * page table and the remaining bits are in the page directory */
        if (radeon_vm_block_size < 9) {
-               dev_warn(rdev->dev, "VM page table size (%d) to small\n",
+               dev_warn(rdev->dev, "VM page table size (%d) too small\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
 
        if (radeon_vm_block_size > 24 ||
-           radeon_vm_size < (1ull << radeon_vm_block_size)) {
-               dev_warn(rdev->dev, "VM page table size (%d) to large\n",
+           (radeon_vm_size * 1024) < (1ull << radeon_vm_block_size)) {
+               dev_warn(rdev->dev, "VM page table size (%d) too large\n",
                         radeon_vm_block_size);
                radeon_vm_block_size = 9;
        }
@@ -1238,7 +1238,7 @@ int radeon_device_init(struct radeon_device *rdev,
        /* Adjust VM size here.
         * Max GPUVM size for cayman+ is 40 bits.
         */
-       rdev->vm_manager.max_pfn = radeon_vm_size << 8;
+       rdev->vm_manager.max_pfn = radeon_vm_size << 18;
 
        /* Set asic functions */
        r = radeon_asic_init(rdev);
index 5ed6170..bf25061 100644 (file)
@@ -66,7 +66,8 @@ static void avivo_crtc_load_lut(struct drm_crtc *crtc)
                             (radeon_crtc->lut_b[i] << 0));
        }
 
-       WREG32(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id);
+       /* Only change bit 0 of LUT_SEL, other bits are set elsewhere */
+       WREG32_P(AVIVO_D1GRPH_LUT_SEL + radeon_crtc->crtc_offset, radeon_crtc->crtc_id, ~1);
 }
 
 static void dce4_crtc_load_lut(struct drm_crtc *crtc)
@@ -284,7 +285,6 @@ static void radeon_unpin_work_func(struct work_struct *__work)
 void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
 {
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
-       struct radeon_flip_work *work;
        unsigned long flags;
        u32 update_pending;
        int vpos, hpos;
@@ -294,8 +294,11 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
                return;
 
        spin_lock_irqsave(&rdev->ddev->event_lock, flags);
-       work = radeon_crtc->flip_work;
-       if (work == NULL) {
+       if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+               DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+                                "RADEON_FLIP_SUBMITTED(%d)\n",
+                                radeon_crtc->flip_status,
+                                RADEON_FLIP_SUBMITTED);
                spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
                return;
        }
@@ -343,12 +346,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 
        spin_lock_irqsave(&rdev->ddev->event_lock, flags);
        work = radeon_crtc->flip_work;
-       if (work == NULL) {
+       if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
+               DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
+                                "RADEON_FLIP_SUBMITTED(%d)\n",
+                                radeon_crtc->flip_status,
+                                RADEON_FLIP_SUBMITTED);
                spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
                return;
        }
 
        /* Pageflip completed. Clean up. */
+       radeon_crtc->flip_status = RADEON_FLIP_NONE;
        radeon_crtc->flip_work = NULL;
 
        /* wakeup userspace */
@@ -357,8 +365,8 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
 
        spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
 
-       radeon_fence_unref(&work->fence);
-       radeon_irq_kms_pflip_irq_get(rdev, work->crtc_id);
+       drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
+       radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
        queue_work(radeon_crtc->flip_queue, &work->unpin_work);
 }
 
@@ -377,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
        struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
 
        struct drm_crtc *crtc = &radeon_crtc->base;
-       struct drm_framebuffer *fb = work->fb;
-
-       uint32_t tiling_flags, pitch_pixels;
-       uint64_t base;
-
        unsigned long flags;
        int r;
 
         down_read(&rdev->exclusive_lock);
-       while (work->fence) {
+       if (work->fence) {
                r = radeon_fence_wait(work->fence, false);
                if (r == -EDEADLK) {
                        up_read(&rdev->exclusive_lock);
                        r = radeon_gpu_reset(rdev);
                        down_read(&rdev->exclusive_lock);
                }
+               if (r)
+                       DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
 
-               if (r) {
-                       DRM_ERROR("failed to wait on page flip fence (%d)!\n",
-                                 r);
-                       goto cleanup;
-               } else
-                       radeon_fence_unref(&work->fence);
+               /* We continue with the page flip even if we failed to wait on
+                * the fence, otherwise the DRM core and userspace will be
+                * confused about which BO the CRTC is scanning out
+                */
+
+               radeon_fence_unref(&work->fence);
        }
 
+       /* We borrow the event spin lock for protecting flip_status */
+       spin_lock_irqsave(&crtc->dev->event_lock, flags);
+
+       /* set the proper interrupt */
+       radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
+
+       /* do the flip (mmio) */
+       radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
+
+       radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
+       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+       up_read(&rdev->exclusive_lock);
+}
+
+static int radeon_crtc_page_flip(struct drm_crtc *crtc,
+                                struct drm_framebuffer *fb,
+                                struct drm_pending_vblank_event *event,
+                                uint32_t page_flip_flags)
+{
+       struct drm_device *dev = crtc->dev;
+       struct radeon_device *rdev = dev->dev_private;
+       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+       struct radeon_framebuffer *old_radeon_fb;
+       struct radeon_framebuffer *new_radeon_fb;
+       struct drm_gem_object *obj;
+       struct radeon_flip_work *work;
+       struct radeon_bo *new_rbo;
+       uint32_t tiling_flags, pitch_pixels;
+       uint64_t base;
+       unsigned long flags;
+       int r;
+
+       work = kzalloc(sizeof *work, GFP_KERNEL);
+       if (work == NULL)
+               return -ENOMEM;
+
+       INIT_WORK(&work->flip_work, radeon_flip_work_func);
+       INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
+
+       work->rdev = rdev;
+       work->crtc_id = radeon_crtc->crtc_id;
+       work->event = event;
+
+       /* schedule unpin of the old buffer */
+       old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
+       obj = old_radeon_fb->obj;
+
+       /* take a reference to the old object */
+       drm_gem_object_reference(obj);
+       work->old_rbo = gem_to_radeon_bo(obj);
+
+       new_radeon_fb = to_radeon_framebuffer(fb);
+       obj = new_radeon_fb->obj;
+       new_rbo = gem_to_radeon_bo(obj);
+
+       spin_lock(&new_rbo->tbo.bdev->fence_lock);
+       if (new_rbo->tbo.sync_obj)
+               work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
+       spin_unlock(&new_rbo->tbo.bdev->fence_lock);
+
        /* pin the new buffer */
-       DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n",
-                        work->old_rbo, work->new_rbo);
+       DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
+                        work->old_rbo, new_rbo);
 
-       r = radeon_bo_reserve(work->new_rbo, false);
+       r = radeon_bo_reserve(new_rbo, false);
        if (unlikely(r != 0)) {
                DRM_ERROR("failed to reserve new rbo buffer before flip\n");
                goto cleanup;
        }
        /* Only 27 bit offset for legacy CRTC */
-       r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM,
+       r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
                                     ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
        if (unlikely(r != 0)) {
-               radeon_bo_unreserve(work->new_rbo);
+               radeon_bo_unreserve(new_rbo);
                r = -EINVAL;
                DRM_ERROR("failed to pin new rbo buffer before flip\n");
                goto cleanup;
        }
-       radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL);
-       radeon_bo_unreserve(work->new_rbo);
+       radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
+       radeon_bo_unreserve(new_rbo);
 
        if (!ASIC_IS_AVIVO(rdev)) {
                /* crtc offset is from display base addr not FB location */
@@ -458,82 +523,24 @@ static void radeon_flip_work_func(struct work_struct *__work)
                }
                base &= ~7;
        }
+       work->base = base;
 
-       /* We borrow the event spin lock for protecting flip_work */
-       spin_lock_irqsave(&crtc->dev->event_lock, flags);
-
-       /* set the proper interrupt */
-       radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
-
-       /* do the flip (mmio) */
-       radeon_page_flip(rdev, radeon_crtc->crtc_id, base);
-
-       spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-       up_read(&rdev->exclusive_lock);
-
-       return;
-
-cleanup:
-       drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-       radeon_fence_unref(&work->fence);
-       kfree(work);
-       up_read(&rdev->exclusive_lock);
-}
-
-static int radeon_crtc_page_flip(struct drm_crtc *crtc,
-                                struct drm_framebuffer *fb,
-                                struct drm_pending_vblank_event *event,
-                                uint32_t page_flip_flags)
-{
-       struct drm_device *dev = crtc->dev;
-       struct radeon_device *rdev = dev->dev_private;
-       struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
-       struct radeon_framebuffer *old_radeon_fb;
-       struct radeon_framebuffer *new_radeon_fb;
-       struct drm_gem_object *obj;
-       struct radeon_flip_work *work;
-       unsigned long flags;
-
-       work = kzalloc(sizeof *work, GFP_KERNEL);
-       if (work == NULL)
-               return -ENOMEM;
-
-       INIT_WORK(&work->flip_work, radeon_flip_work_func);
-       INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
-
-       work->rdev = rdev;
-       work->crtc_id = radeon_crtc->crtc_id;
-       work->fb = fb;
-       work->event = event;
-
-       /* schedule unpin of the old buffer */
-       old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
-       obj = old_radeon_fb->obj;
-
-       /* take a reference to the old object */
-       drm_gem_object_reference(obj);
-       work->old_rbo = gem_to_radeon_bo(obj);
-
-       new_radeon_fb = to_radeon_framebuffer(fb);
-       obj = new_radeon_fb->obj;
-       work->new_rbo = gem_to_radeon_bo(obj);
-
-       spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
-       if (work->new_rbo->tbo.sync_obj)
-               work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
-       spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
+       r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
+       if (r) {
+               DRM_ERROR("failed to get vblank before flip\n");
+               goto pflip_cleanup;
+       }
 
        /* We borrow the event spin lock for protecting flip_work */
        spin_lock_irqsave(&crtc->dev->event_lock, flags);
 
-       if (radeon_crtc->flip_work) {
+       if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
                DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
                spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
-               drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
-               radeon_fence_unref(&work->fence);
-               kfree(work);
-               return -EBUSY;
+               r = -EBUSY;
+               goto vblank_cleanup;
        }
+       radeon_crtc->flip_status = RADEON_FLIP_PENDING;
        radeon_crtc->flip_work = work;
 
        /* update crtc fb */
@@ -542,8 +549,27 @@ static int radeon_crtc_page_flip(struct drm_crtc *crtc,
        spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
 
        queue_work(radeon_crtc->flip_queue, &work->flip_work);
-
        return 0;
+
+vblank_cleanup:
+       drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
+
+pflip_cleanup:
+       if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
+               DRM_ERROR("failed to reserve new rbo in error path\n");
+               goto cleanup;
+       }
+       if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
+               DRM_ERROR("failed to unpin new rbo in error path\n");
+       }
+       radeon_bo_unreserve(new_rbo);
+
+cleanup:
+       drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
+       radeon_fence_unref(&work->fence);
+       kfree(work);
+
+       return r;
 }
 
 static int
@@ -803,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
        struct radeon_device *rdev = dev->dev_private;
        int ret = 0;
 
+       /* don't leak the edid if we already fetched it in detect() */
+       if (radeon_connector->edid)
+               goto got_edid;
+
        /* on hw with routers, select right port */
        if (radeon_connector->router.ddc_valid)
                radeon_router_select_ddc_port(radeon_connector);
@@ -841,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
                        radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
        }
        if (radeon_connector->edid) {
+got_edid:
                drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
                ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
                drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
index 6e30174..e9e3610 100644 (file)
@@ -173,8 +173,9 @@ int radeon_dpm = -1;
 int radeon_aspm = -1;
 int radeon_runtime_pm = -1;
 int radeon_hard_reset = 0;
-int radeon_vm_size = 4096;
+int radeon_vm_size = 4;
 int radeon_vm_block_size = 9;
+int radeon_deep_color = 0;
 
 MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
 module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -242,12 +243,15 @@ module_param_named(runpm, radeon_runtime_pm, int, 0444);
 MODULE_PARM_DESC(hard_reset, "PCI config reset (1 = force enable, 0 = disable (default))");
 module_param_named(hard_reset, radeon_hard_reset, int, 0444);
 
-MODULE_PARM_DESC(vm_size, "VM address space size in megabytes (default 4GB)");
+MODULE_PARM_DESC(vm_size, "VM address space size in gigabytes (default 4GB)");
 module_param_named(vm_size, radeon_vm_size, int, 0444);
 
 MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
 module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
 
+MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
+module_param_named(deep_color, radeon_deep_color, int, 0444);
+
 static struct pci_device_id pciidlist[] = {
        radeon_PCI_IDS
 };
index 35d9318..d25ae6a 100644 (file)
@@ -579,7 +579,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN) {
                struct radeon_fpriv *fpriv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm;
                int r;
 
                fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL);
@@ -587,7 +587,8 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                        return -ENOMEM;
                }
 
-               r = radeon_vm_init(rdev, &fpriv->vm);
+               vm = &fpriv->vm;
+               r = radeon_vm_init(rdev, vm);
                if (r) {
                        kfree(fpriv);
                        return r;
@@ -596,22 +597,23 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv)
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
 
                        /* map the ib pool buffer read only into
                         * virtual address space */
-                       bo_va = radeon_vm_bo_add(rdev, &fpriv->vm,
-                                                rdev->ring_tmp_bo.bo);
-                       r = radeon_vm_bo_set_addr(rdev, bo_va, RADEON_VA_IB_OFFSET,
+                       vm->ib_bo_va = radeon_vm_bo_add(rdev, vm,
+                                                       rdev->ring_tmp_bo.bo);
+                       r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va,
+                                                 RADEON_VA_IB_OFFSET,
                                                  RADEON_VM_PAGE_READABLE |
                                                  RADEON_VM_PAGE_SNOOPED);
 
                        radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        if (r) {
-                               radeon_vm_fini(rdev, &fpriv->vm);
+                               radeon_vm_fini(rdev, vm);
                                kfree(fpriv);
                                return r;
                        }
@@ -640,21 +642,19 @@ void radeon_driver_postclose_kms(struct drm_device *dev,
        /* new gpu have virtual address space support */
        if (rdev->family >= CHIP_CAYMAN && file_priv->driver_priv) {
                struct radeon_fpriv *fpriv = file_priv->driver_priv;
-               struct radeon_bo_va *bo_va;
+               struct radeon_vm *vm = &fpriv->vm;
                int r;
 
                if (rdev->accel_working) {
                        r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false);
                        if (!r) {
-                               bo_va = radeon_vm_bo_find(&fpriv->vm,
-                                                         rdev->ring_tmp_bo.bo);
-                               if (bo_va)
-                                       radeon_vm_bo_rmv(rdev, bo_va);
+                               if (vm->ib_bo_va)
+                                       radeon_vm_bo_rmv(rdev, vm->ib_bo_va);
                                radeon_bo_unreserve(rdev->ring_tmp_bo.bo);
                        }
                }
 
-               radeon_vm_fini(rdev, &fpriv->vm);
+               radeon_vm_fini(rdev, vm);
                kfree(fpriv);
                file_priv->driver_priv = NULL;
        }
index ad0e4b8..0592ddb 100644 (file)
@@ -46,6 +46,10 @@ struct radeon_device;
 #define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
 #define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
 
+#define RADEON_MAX_HPD_PINS 7
+#define RADEON_MAX_CRTCS 6
+#define RADEON_MAX_AFMT_BLOCKS 7
+
 enum radeon_rmx_type {
        RMX_OFF,
        RMX_FULL,
@@ -233,8 +237,8 @@ struct radeon_mode_info {
        struct card_info *atom_card_info;
        enum radeon_connector_table connector_table;
        bool mode_config_initialized;
-       struct radeon_crtc *crtcs[6];
-       struct radeon_afmt *afmt[7];
+       struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
+       struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
        /* DVI-I properties */
        struct drm_property *coherent_mode_property;
        /* DAC enable load detect */
@@ -302,6 +306,12 @@ struct radeon_atom_ss {
        uint16_t amount;
 };
 
+enum radeon_flip_status {
+       RADEON_FLIP_NONE,
+       RADEON_FLIP_PENDING,
+       RADEON_FLIP_SUBMITTED
+};
+
 struct radeon_crtc {
        struct drm_crtc base;
        int crtc_id;
@@ -327,6 +337,7 @@ struct radeon_crtc {
        /* page flipping */
        struct workqueue_struct *flip_queue;
        struct radeon_flip_work *flip_work;
+       enum radeon_flip_status flip_status;
        /* pll sharing */
        struct radeon_atom_ss ss;
        bool ss_enabled;
index 12c663e..e447e39 100644 (file)
@@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
                        rdev->pm.dpm.ac_power = true;
                else
                        rdev->pm.dpm.ac_power = false;
-               if (rdev->asic->dpm.enable_bapm)
-                       radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+               if (rdev->family == CHIP_ARUBA) {
+                       if (rdev->asic->dpm.enable_bapm)
+                               radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
+               }
                mutex_unlock(&rdev->pm.mutex);
         } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
                if (rdev->pm.profile == PM_PROFILE_AUTO) {
index 899d912..725d366 100644 (file)
@@ -332,6 +332,7 @@ struct radeon_bo_va *radeon_vm_bo_add(struct radeon_device *rdev,
        bo_va->ref_count = 1;
        INIT_LIST_HEAD(&bo_va->bo_list);
        INIT_LIST_HEAD(&bo_va->vm_list);
+       INIT_LIST_HEAD(&bo_va->vm_status);
 
        mutex_lock(&vm->mutex);
        list_add(&bo_va->vm_list, &vm->va);
@@ -468,6 +469,19 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                head = &tmp->vm_list;
        }
 
+       if (bo_va->soffset) {
+               /* add a clone of the bo_va to clear the old address */
+               tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
+               if (!tmp) {
+                       mutex_unlock(&vm->mutex);
+                       return -ENOMEM;
+               }
+               tmp->soffset = bo_va->soffset;
+               tmp->eoffset = bo_va->eoffset;
+               tmp->vm = vm;
+               list_add(&tmp->vm_status, &vm->freed);
+       }
+
        bo_va->soffset = soffset;
        bo_va->eoffset = eoffset;
        bo_va->flags = flags;
@@ -495,7 +509,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                mutex_unlock(&vm->mutex);
 
                r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
-                                    RADEON_GPU_PAGE_SIZE, false, 
+                                    RADEON_GPU_PAGE_SIZE, true,
                                     RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
                if (r)
                        return r;
@@ -823,25 +837,19 @@ static void radeon_vm_update_ptes(struct radeon_device *rdev,
  * Object have to be reserved and mutex must be locked!
  */
 int radeon_vm_bo_update(struct radeon_device *rdev,
-                       struct radeon_vm *vm,
-                       struct radeon_bo *bo,
+                       struct radeon_bo_va *bo_va,
                        struct ttm_mem_reg *mem)
 {
+       struct radeon_vm *vm = bo_va->vm;
        struct radeon_ib ib;
-       struct radeon_bo_va *bo_va;
        unsigned nptes, ndw;
        uint64_t addr;
        int r;
 
-       bo_va = radeon_vm_bo_find(vm, bo);
-       if (bo_va == NULL) {
-               dev_err(rdev->dev, "bo %p not in vm %p\n", bo, vm);
-               return -EINVAL;
-       }
 
        if (!bo_va->soffset) {
                dev_err(rdev->dev, "bo %p don't has a mapping in vm %p\n",
-                       bo, vm);
+                       bo_va->bo, vm);
                return -EINVAL;
        }
 
@@ -868,7 +876,7 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
 
        trace_radeon_vm_bo_update(bo_va);
 
-       nptes = radeon_bo_ngpu_pages(bo);
+       nptes = (bo_va->eoffset - bo_va->soffset) / RADEON_GPU_PAGE_SIZE;
 
        /* padding, etc. */
        ndw = 64;
@@ -910,6 +918,34 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
        return 0;
 }
 
+/**
+ * radeon_vm_clear_freed - clear freed BOs in the PT
+ *
+ * @rdev: radeon_device pointer
+ * @vm: requested vm
+ *
+ * Make sure all freed BOs are cleared in the PT.
+ * Returns 0 for success.
+ *
+ * PTs have to be reserved and mutex must be locked!
+ */
+int radeon_vm_clear_freed(struct radeon_device *rdev,
+                         struct radeon_vm *vm)
+{
+       struct radeon_bo_va *bo_va, *tmp;
+       int r;
+
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status) {
+               list_del(&bo_va->vm_status);
+               r = radeon_vm_bo_update(rdev, bo_va, NULL);
+               kfree(bo_va);
+               if (r)
+                       return r;
+       }
+       return 0;
+
+}
+
 /**
  * radeon_vm_bo_rmv - remove a bo to a specific vm
  *
@@ -917,27 +953,27 @@ int radeon_vm_bo_update(struct radeon_device *rdev,
  * @bo_va: requested bo_va
  *
  * Remove @bo_va->bo from the requested vm (cayman+).
- * Remove @bo_va->bo from the list of bos associated with the bo_va->vm and
- * remove the ptes for @bo_va in the page table.
- * Returns 0 for success.
  *
  * Object have to be reserved!
  */
-int radeon_vm_bo_rmv(struct radeon_device *rdev,
-                    struct radeon_bo_va *bo_va)
+void radeon_vm_bo_rmv(struct radeon_device *rdev,
+                     struct radeon_bo_va *bo_va)
 {
-       int r = 0;
+       struct radeon_vm *vm = bo_va->vm;
 
-       mutex_lock(&bo_va->vm->mutex);
-       if (bo_va->soffset)
-               r = radeon_vm_bo_update(rdev, bo_va->vm, bo_va->bo, NULL);
+       list_del(&bo_va->bo_list);
 
+       mutex_lock(&vm->mutex);
        list_del(&bo_va->vm_list);
-       mutex_unlock(&bo_va->vm->mutex);
-       list_del(&bo_va->bo_list);
 
-       kfree(bo_va);
-       return r;
+       if (bo_va->soffset) {
+               bo_va->bo = NULL;
+               list_add(&bo_va->vm_status, &vm->freed);
+       } else {
+               kfree(bo_va);
+       }
+
+       mutex_unlock(&vm->mutex);
 }
 
 /**
@@ -975,11 +1011,13 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
        int r;
 
        vm->id = 0;
+       vm->ib_bo_va = NULL;
        vm->fence = NULL;
        vm->last_flush = NULL;
        vm->last_id_use = NULL;
        mutex_init(&vm->mutex);
        INIT_LIST_HEAD(&vm->va);
+       INIT_LIST_HEAD(&vm->freed);
 
        pd_size = radeon_vm_directory_size(rdev);
        pd_entries = radeon_vm_num_pdes(rdev);
@@ -992,7 +1030,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
                return -ENOMEM;
        }
 
-       r = radeon_bo_create(rdev, pd_size, align, false,
+       r = radeon_bo_create(rdev, pd_size, align, true,
                             RADEON_GEM_DOMAIN_VRAM, NULL,
                             &vm->page_directory);
        if (r)
@@ -1034,7 +1072,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
                        kfree(bo_va);
                }
        }
-
+       list_for_each_entry_safe(bo_va, tmp, &vm->freed, vm_status)
+               kfree(bo_va);
 
        for (i = 0; i < radeon_vm_num_pdes(rdev); i++)
                radeon_bo_unref(&vm->page_tables[i].bo);
index 237dd29..3e21e86 100644 (file)
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        for (i = 0; i < rdev->num_crtc; i++) {
                if (save->crtc_enabled[i]) {
                        tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
-                       if ((tmp & 0x3) != 0) {
-                               tmp &= ~0x3;
+                       if ((tmp & 0x7) != 3) {
+                               tmp &= ~0x7;
+                               tmp |= 0x3;
                                WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
                        }
                        tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
index da041a4..3c76e1d 100644 (file)
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
        pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
                                                       ASIC_INTERNAL_MEMORY_SS, 0);
 
-       /* disable ss, causes hangs on some cayman boards */
-       if (rdev->family == CHIP_CAYMAN) {
-               pi->sclk_ss = false;
-               pi->mclk_ss = false;
-       }
-
        if (pi->sclk_ss || pi->mclk_ss)
                pi->dynamic_ss = true;
        else
index 730cee2..9e854fd 100644 (file)
@@ -6103,6 +6103,7 @@ static inline u32 si_get_ih_wptr(struct radeon_device *rdev)
                tmp = RREG32(IH_RB_CNTL);
                tmp |= IH_WPTR_OVERFLOW_CLEAR;
                WREG32(IH_RB_CNTL, tmp);
+               wptr &= ~RB_OVERFLOW;
        }
        return (wptr & rdev->ih.ptr_mask);
 }
@@ -6376,14 +6377,16 @@ restart_ih:
                case 147:
                        addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
                        status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
+                       /* reset addr and status */
+                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
+                       if (addr == 0x0 && status == 0x0)
+                               break;
                        dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_ADDR   0x%08X\n",
                                addr);
                        dev_err(rdev->dev, "  VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
                                status);
                        si_vm_decode_fault(rdev, status, addr);
-                       /* reset addr and status */
-                       WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
                        break;
                case 176: /* RINGID0 CP_INT */
                        radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
index 2a2822c..32e50be 100644 (file)
@@ -1874,7 +1874,16 @@ int trinity_dpm_init(struct radeon_device *rdev)
        for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
                pi->at[i] = TRINITY_AT_DFLT;
 
-       pi->enable_bapm = false;
+       /* There are stability issues reported on with
+        * bapm enabled when switching between AC and battery
+        * power.  At the same time, some MSI boards hang
+        * if it's not enabled and dpm is enabled.  Just enable
+        * it for MSI boards right now.
+        */
+       if (rdev->pdev->subsystem_vendor == 0x1462)
+               pi->enable_bapm = true;
+       else
+               pi->enable_bapm = false;
        pi->enable_nbps_policy = true;
        pi->enable_sclk_ds = true;
        pi->enable_gfx_power_gating = true;
index a89ad93..b031b48 100644 (file)
@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
-               vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
                vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
        }
 
index 800c8b6..5e79c6a 100644 (file)
@@ -810,7 +810,7 @@ config HID_ZYDACRON
 
 config HID_SENSOR_HUB
        tristate "HID Sensors framework support"
-       depends on HID
+       depends on HID && HAS_IOMEM
        select MFD_CORE
        default n
        ---help---
index 6d00bb9..48b66bb 100644 (file)
 
 #define USB_VENDOR_ID_ETURBOTOUCH      0x22b9
 #define USB_DEVICE_ID_ETURBOTOUCH      0x0006
+#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968
 
 #define USB_VENDOR_ID_EZKEY            0x0518
 #define USB_DEVICE_ID_BTC_8193         0x0002
 
 #define USB_VENDOR_ID_PENMOUNT         0x14e1
 #define USB_DEVICE_ID_PENMOUNT_PCI     0x3500
+#define USB_DEVICE_ID_PENMOUNT_1610    0x1610
+#define USB_DEVICE_ID_PENMOUNT_1640    0x1640
 
 #define USB_VENDOR_ID_PETALYNX         0x18b1
 #define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE   0x0037
index 2451c7e..578bbe6 100644 (file)
@@ -428,6 +428,7 @@ static int rmi_raw_event(struct hid_device *hdev,
        return 0;
 }
 
+#ifdef CONFIG_PM
 static int rmi_post_reset(struct hid_device *hdev)
 {
        return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
@@ -437,6 +438,7 @@ static int rmi_post_resume(struct hid_device *hdev)
 {
        return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
 }
+#endif /* CONFIG_PM */
 
 #define RMI4_MAX_PAGE 0xff
 #define RMI4_PAGE_SIZE 0x0100
index a8d5c8f..e244e44 100644 (file)
@@ -159,17 +159,18 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
 {
        struct hid_sensor_hub_callbacks_list *callback;
        struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+       unsigned long flags;
 
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list)
                if (callback->usage_id == usage_id &&
                                                callback->hsdev == hsdev) {
-                       spin_unlock(&pdata->dyn_callback_lock);
+                       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
                        return -EINVAL;
                }
        callback = kzalloc(sizeof(*callback), GFP_ATOMIC);
        if (!callback) {
-               spin_unlock(&pdata->dyn_callback_lock);
+               spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
                return -ENOMEM;
        }
        callback->hsdev = hsdev;
@@ -177,7 +178,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
        callback->usage_id = usage_id;
        callback->priv = NULL;
        list_add_tail(&callback->list, &pdata->dyn_callback_list);
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -188,8 +189,9 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
 {
        struct hid_sensor_hub_callbacks_list *callback;
        struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
+       unsigned long flags;
 
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list)
                if (callback->usage_id == usage_id &&
                                                callback->hsdev == hsdev) {
@@ -197,7 +199,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
                        kfree(callback);
                        break;
                }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -378,15 +380,16 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
 {
        struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
        struct hid_sensor_hub_callbacks_list *callback;
+       unsigned long flags;
 
        hid_dbg(hdev, " sensor_hub_suspend\n");
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
                if (callback->usage_callback->suspend)
                        callback->usage_callback->suspend(
                                        callback->hsdev, callback->priv);
        }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -395,15 +398,16 @@ static int sensor_hub_resume(struct hid_device *hdev)
 {
        struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
        struct hid_sensor_hub_callbacks_list *callback;
+       unsigned long flags;
 
        hid_dbg(hdev, " sensor_hub_resume\n");
-       spin_lock(&pdata->dyn_callback_lock);
+       spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
        list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
                if (callback->usage_callback->resume)
                        callback->usage_callback->resume(
                                        callback->hsdev, callback->priv);
        }
-       spin_unlock(&pdata->dyn_callback_lock);
+       spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
 
        return 0;
 }
@@ -632,6 +636,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
                        if (name == NULL) {
                                hid_err(hdev, "Failed MFD device name\n");
                                        ret = -ENOMEM;
+                                       kfree(hsdev);
                                        goto err_no_mem;
                        }
                        sd->hid_sensor_hub_client_devs[
index 59badc1..31e6727 100644 (file)
@@ -49,6 +49,7 @@ static const struct hid_blacklist {
 
        { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
+       { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
        { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
        { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
@@ -76,6 +77,8 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
+       { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
        { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
index ce4be37..737fa2e 100644 (file)
@@ -1115,7 +1115,7 @@ static int ssi_protocol_probe(struct device *dev)
                goto out;
        }
 
-       ssi->netdev = alloc_netdev(0, ifname, ssip_pn_setup);
+       ssi->netdev = alloc_netdev(0, ifname, NET_NAME_UNKNOWN, ssip_pn_setup);
        if (!ssi->netdev) {
                dev_err(dev, "No memory for netdev\n");
                err = -ENOMEM;
index e84f452..ae22e3c 100644 (file)
@@ -339,9 +339,13 @@ static void process_chn_event(u32 relid)
                 */
 
                do {
-                       hv_begin_read(&channel->inbound);
+                       if (read_state)
+                               hv_begin_read(&channel->inbound);
                        channel->onchannel_callback(arg);
-                       bytes_to_read = hv_end_read(&channel->inbound);
+                       if (read_state)
+                               bytes_to_read = hv_end_read(&channel->inbound);
+                       else
+                               bytes_to_read = 0;
                } while (read_state && (bytes_to_read != 0));
        } else {
                pr_err("no channel callback for relid - %u\n", relid);
index eaaa3d8..23b2ce2 100644 (file)
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
                /*
                 * Send the information to the user-level daemon.
                 */
-               fcopy_send_data();
                schedule_delayed_work(&fcopy_work, 5*HZ);
+               fcopy_send_data();
                return;
        }
        icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
index ea85253..521c146 100644 (file)
@@ -127,6 +127,17 @@ kvp_work_func(struct work_struct *dummy)
        kvp_respond_to_host(NULL, HV_E_FAIL);
 }
 
+static void poll_channel(struct vmbus_channel *channel)
+{
+       if (channel->target_cpu != smp_processor_id())
+               smp_call_function_single(channel->target_cpu,
+                                        hv_kvp_onchannelcallback,
+                                        channel, true);
+       else
+               hv_kvp_onchannelcallback(channel);
+}
+
+
 static int kvp_handle_handshake(struct hv_kvp_msg *msg)
 {
        int ret = 1;
@@ -155,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
                kvp_register(dm_reg_value);
                kvp_transaction.active = false;
                if (kvp_transaction.kvp_context)
-                       hv_kvp_onchannelcallback(kvp_transaction.kvp_context);
+                       poll_channel(kvp_transaction.kvp_context);
        }
        return ret;
 }
@@ -568,7 +579,7 @@ response_done:
 
        vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
                                VM_PKT_DATA_INBAND, 0);
-
+       poll_channel(channel);
 }
 
 /*
@@ -603,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context)
                return;
        }
 
-       vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen,
+       vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
                         &requestid);
 
        if (recvlen > 0) {
index dd76180..3b9c9ef 100644 (file)
@@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev,
                (struct hv_util_service *)dev_id->driver_data;
        int ret;
 
-       srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL);
+       srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
        if (!srv->recv_buffer)
                return -ENOMEM;
        if (srv->util_init) {
index 08531a1..02d3d85 100644 (file)
@@ -1052,7 +1052,7 @@ config SENSORS_PC87427
          will be called pc87427.
 
 config SENSORS_NTC_THERMISTOR
-       tristate "NTC thermistor support"
+       tristate "NTC thermistor support from Murata"
        depends on !OF || IIO=n || IIO
        help
          This driver supports NTC thermistors sensor reading and its
@@ -1060,7 +1060,8 @@ config SENSORS_NTC_THERMISTOR
          send notifications about the temperature.
 
          Currently, this driver supports
-         NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333.
+         NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333
+         from Murata.
 
          This driver can also be built as a module.  If so, the module
          will be called ntc-thermistor.
@@ -1176,6 +1177,7 @@ config SENSORS_DME1737
 config SENSORS_EMC1403
        tristate "SMSC EMC1403/23 thermal sensor"
        depends on I2C
+       select REGMAP_I2C
        help
          If you say yes here you get support for the SMSC EMC1403/23
          temperature monitoring chip.
index 5ffd81f..0625e50 100644 (file)
@@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev,
        return sprintf(buf, "%u\n", !!(alarms & mask));
 }
 
-static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 0, 0);
+static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
+                           adc128_show_in, NULL, 0, 0);
 static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 0, 1);
 static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 0, 2);
 
-static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 1, 0);
+static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO,
+                           adc128_show_in, NULL, 1, 0);
 static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 1, 1);
 static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 1, 2);
 
-static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 2, 0);
+static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO,
+                           adc128_show_in, NULL, 2, 0);
 static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 2, 1);
 static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 2, 2);
 
-static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 3, 0);
+static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO,
+                           adc128_show_in, NULL, 3, 0);
 static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 3, 1);
 static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 3, 2);
 
-static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 4, 0);
+static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO,
+                           adc128_show_in, NULL, 4, 0);
 static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 4, 1);
 static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 4, 2);
 
-static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 5, 0);
+static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO,
+                           adc128_show_in, NULL, 5, 0);
 static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 5, 1);
 static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 5, 2);
 
-static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO,
-                           adc128_show_in, adc128_set_in, 6, 0);
+static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO,
+                           adc128_show_in, NULL, 6, 0);
 static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
                            adc128_show_in, adc128_set_in, 6, 1);
 static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
index 3eb4281..d74241b 100644 (file)
@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
        struct adm1021_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
        long temp;
-       int err;
+       int reg_val, err;
 
        err = kstrtol(buf, 10, &temp);
        if (err)
@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
        temp /= 1000;
 
        mutex_lock(&data->update_lock);
-       data->temp_max[index] = clamp_val(temp, -128, 127);
+       reg_val = clamp_val(temp, -128, 127);
+       data->temp_max[index] = reg_val * 1000;
        if (!read_only)
                i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
-                                         data->temp_max[index]);
+                                         reg_val);
        mutex_unlock(&data->update_lock);
 
        return count;
@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
        struct adm1021_data *data = dev_get_drvdata(dev);
        struct i2c_client *client = data->client;
        long temp;
-       int err;
+       int reg_val, err;
 
        err = kstrtol(buf, 10, &temp);
        if (err)
@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
        temp /= 1000;
 
        mutex_lock(&data->update_lock);
-       data->temp_min[index] = clamp_val(temp, -128, 127);
+       reg_val = clamp_val(temp, -128, 127);
+       data->temp_min[index] = reg_val * 1000;
        if (!read_only)
                i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
-                                         data->temp_min[index]);
+                                         reg_val);
        mutex_unlock(&data->update_lock);
 
        return count;
index 78339e8..2804571 100644 (file)
@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
        /* Update the value */
        reg = (reg & 0x3F) | (val << 6);
 
+       /* Update the cache */
+       data->fan_div[attr->index] = reg;
+
        /* Write value */
        i2c_smbus_write_byte_data(client,
                                  ADM1029_REG_FAN_DIV[attr->index], reg);
index a8a540c..51c1a5a 100644 (file)
@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
+       val = clamp_val(val, 0, 127000);
        mutex_lock(&data->update_lock);
        data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
        adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
+       val = clamp_val(val, 0, 127000);
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
                                                  data->pwm[nr]);
@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, 127000);
        mutex_lock(&data->update_lock);
        data->temp_min[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, 127000);
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
        if (ret)
                return ret;
 
-       val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875);
+       val = clamp_val(val, -55000, 127000);
        mutex_lock(&data->update_lock);
        data->temp_crit[nr] = TEMP_TO_REG(val);
        adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
index 0f4dea5..9ee3913 100644 (file)
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
                return -EINVAL;
 
        temp = DIV_ROUND_CLOSEST(temp, 1000);
-       temp = clamp_val(temp, 0, 255);
+       temp = clamp_val(temp, -128, 127);
 
        mutex_lock(&data->lock);
        data->pwm_tmin[attr->index] = temp;
index eea8172..9f2be3d 100644 (file)
@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
        get_temp_alarm, NULL, IDX_TEMP1_MAX);
 static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
        get_temp_alarm, NULL, IDX_TEMP1_CRIT);
-static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR,
+static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
        get_temp, NULL, IDX_TEMP2_INPUT);
 static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
        set_temp, IDX_TEMP2_MIN);
index afd3104..d14ab3c 100644 (file)
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9052-hwmon\n");
+       return sprintf(buf, "da9052\n");
 }
 
 static ssize_t show_label(struct device *dev,
index 73b3865..35eb773 100644 (file)
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
                                      struct device_attribute *devattr,
                                      char *buf)
 {
-       return sprintf(buf, "da9055-hwmon\n");
+       return sprintf(buf, "da9055\n");
 }
 
 static ssize_t show_label(struct device *dev,
index fd892dd..78002de 100644 (file)
@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
        if (result < 0)
                return result;
 
-       val = DIV_ROUND_CLOSEST(val, 1000);
-       if ((val < -63) || (val > 127))
-               return -EINVAL;
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
 
        mutex_lock(&data->update_lock);
        data->temp_min[nr] = val;
@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
        if (result < 0)
                return result;
 
-       val = DIV_ROUND_CLOSEST(val, 1000);
-       if ((val < -63) || (val > 127))
-               return -EINVAL;
+       val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
 
        mutex_lock(&data->update_lock);
        data->temp_max[nr] = val;
@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
 {
        struct emc2103_data *data = emc2103_update_device(dev);
        struct i2c_client *client = to_i2c_client(dev);
-       long rpm_target;
+       unsigned long rpm_target;
 
-       int result = kstrtol(buf, 10, &rpm_target);
+       int result = kstrtoul(buf, 10, &rpm_target);
        if (result < 0)
                return result;
 
        /* Datasheet states 16384 as maximum RPM target (table 3.2) */
-       if ((rpm_target < 0) || (rpm_target > 16384))
-               return -EINVAL;
+       rpm_target = clamp_val(rpm_target, 0, 16384);
 
        mutex_lock(&data->update_lock);
 
index ba35e4d..2566c43 100644 (file)
@@ -538,7 +538,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
 
        /* Make this driver part of hwmon class. */
        fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
-                                               "gpio-fan", fan_data,
+                                               "gpio_fan", fan_data,
                                                gpio_fan_groups);
        if (IS_ERR(fan_data->hwmon_dev))
                return PTR_ERR(fan_data->hwmon_dev);
index e76feb8..ae66f42 100644 (file)
@@ -163,6 +163,18 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
 }
 
 static const struct of_device_id ntc_match[] = {
+       { .compatible = "murata,ncp15wb473",
+               .data = &ntc_thermistor_id[0] },
+       { .compatible = "murata,ncp18wb473",
+               .data = &ntc_thermistor_id[1] },
+       { .compatible = "murata,ncp21wb473",
+               .data = &ntc_thermistor_id[2] },
+       { .compatible = "murata,ncp03wb473",
+               .data = &ntc_thermistor_id[3] },
+       { .compatible = "murata,ncp15wl333",
+               .data = &ntc_thermistor_id[4] },
+
+       /* Usage of vendor name "ntc" is deprecated */
        { .compatible = "ntc,ncp15wb473",
                .data = &ntc_thermistor_id[0] },
        { .compatible = "ntc,ncp18wb473",
@@ -500,7 +512,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
        }
 
        dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
-                                                               pdev->name);
+                                                               pdev_id->name);
 
        return 0;
 err_after_sysfs:
@@ -534,7 +546,7 @@ static struct platform_driver ntc_thermistor_driver = {
 
 module_platform_driver(ntc_thermistor_driver);
 
-MODULE_DESCRIPTION("NTC Thermistor Driver");
+MODULE_DESCRIPTION("NTC Thermistor Driver from Murata");
 MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS("platform:ntc-thermistor");
index efee4c5..34b9a60 100644 (file)
@@ -86,7 +86,7 @@ static inline u8 IN_TO_REG(unsigned long val, int n)
  */
 static inline s8 TEMP_TO_REG(int val)
 {
-       return clamp_val(SCALE(val, 1, 1000), -128000, 127000);
+       return SCALE(clamp_val(val, -128000, 127000), 1, 1000);
 }
 
 static inline int TEMP_FROM_REG(s8 val)
@@ -384,6 +384,8 @@ static ssize_t set_vrm(struct device *dev, struct device_attribute *attr,
        err = kstrtoul(buf, 10, &val);
        if (err)
                return err;
+       if (val > 255)
+               return -EINVAL;
 
        data->vrm = val;
        return count;
index 6ed76ce..32487c1 100644 (file)
@@ -249,7 +249,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
        int nr = to_sensor_dev_attr(attr)->index; \
        struct w83l786ng_data *data = w83l786ng_update_device(dev); \
        return sprintf(buf, "%d\n", \
-               FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); \
+               FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
 }
 
 show_fan_reg(fan);
index 620d100..9f7d585 100644 (file)
@@ -676,6 +676,16 @@ config I2C_RIIC
          This driver can also be built as a module.  If so, the module
          will be called i2c-riic.
 
+config I2C_RK3X
+       tristate "Rockchip RK3xxx I2C adapter"
+       depends on OF
+       help
+         Say Y here to include support for the I2C adapter in Rockchip RK3xxx
+         SoCs.
+
+         This driver can also be built as a module. If so, the module will
+         be called i2c-rk3x.
+
 config HAVE_S3C2410_I2C
        bool
        help
@@ -764,6 +774,19 @@ config I2C_STU300
          This driver can also be built as a module. If so, the module
          will be called i2c-stu300.
 
+config I2C_SUN6I_P2WI
+       tristate "Allwinner sun6i internal P2WI controller"
+       depends on RESET_CONTROLLER
+       depends on MACH_SUN6I || COMPILE_TEST
+       help
+         If you say yes to this option, support will be included for the
+         P2WI (Push/Pull 2 Wire Interface) controller embedded in some sunxi
+         SOCs.
+         The P2WI looks like an SMBus controller (which supports only byte
+         accesses), except that it only supports one slave device.
+         This interface is used to connect to specific PMIC devices (like the
+         AXP221).
+
 config I2C_TEGRA
        tristate "NVIDIA Tegra internal I2C controller"
        depends on ARCH_TEGRA
index 298692c..dd9a7f8 100644 (file)
@@ -66,6 +66,7 @@ obj-$(CONFIG_I2C_PXA)         += i2c-pxa.o
 obj-$(CONFIG_I2C_PXA_PCI)      += i2c-pxa-pci.o
 obj-$(CONFIG_I2C_QUP)          += i2c-qup.o
 obj-$(CONFIG_I2C_RIIC)         += i2c-riic.o
+obj-$(CONFIG_I2C_RK3X)         += i2c-rk3x.o
 obj-$(CONFIG_I2C_S3C2410)      += i2c-s3c2410.o
 obj-$(CONFIG_I2C_S6000)                += i2c-s6000.o
 obj-$(CONFIG_I2C_SH7760)       += i2c-sh7760.o
@@ -74,6 +75,7 @@ obj-$(CONFIG_I2C_SIMTEC)      += i2c-simtec.o
 obj-$(CONFIG_I2C_SIRF)         += i2c-sirf.o
 obj-$(CONFIG_I2C_ST)           += i2c-st.o
 obj-$(CONFIG_I2C_STU300)       += i2c-stu300.o
+obj-$(CONFIG_I2C_SUN6I_P2WI)   += i2c-sun6i-p2wi.o
 obj-$(CONFIG_I2C_TEGRA)                += i2c-tegra.o
 obj-$(CONFIG_I2C_VERSATILE)    += i2c-versatile.o
 obj-$(CONFIG_I2C_WMT)          += i2c-wmt.o
diff --git a/drivers/i2c/busses/i2c-rk3x.c b/drivers/i2c/busses/i2c-rk3x.c
new file mode 100644 (file)
index 0000000..a979150
--- /dev/null
@@ -0,0 +1,763 @@
+/*
+ * Driver for I2C adapter in Rockchip RK3xxx SoC
+ *
+ * Max Schwarz <max.schwarz@online.de>
+ * based on the patches by Rockchip Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/interrupt.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/spinlock.h>
+#include <linux/clk.h>
+#include <linux/wait.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+
+
+/* Register Map */
+#define REG_CON        0x00 /* control register */
+#define REG_CLKDIV     0x04 /* clock divisor register */
+#define REG_MRXADDR    0x08 /* slave address for REGISTER_TX */
+#define REG_MRXRADDR   0x0c /* slave register address for REGISTER_TX */
+#define REG_MTXCNT     0x10 /* number of bytes to be transmitted */
+#define REG_MRXCNT     0x14 /* number of bytes to be received */
+#define REG_IEN        0x18 /* interrupt enable */
+#define REG_IPD        0x1c /* interrupt pending */
+#define REG_FCNT       0x20 /* finished count */
+
+/* Data buffer offsets */
+#define TXBUFFER_BASE 0x100
+#define RXBUFFER_BASE 0x200
+
+/* REG_CON bits */
+#define REG_CON_EN        BIT(0)
+enum {
+       REG_CON_MOD_TX = 0,      /* transmit data */
+       REG_CON_MOD_REGISTER_TX, /* select register and restart */
+       REG_CON_MOD_RX,          /* receive data */
+       REG_CON_MOD_REGISTER_RX, /* broken: transmits read addr AND writes
+                                 * register addr */
+};
+#define REG_CON_MOD(mod)  ((mod) << 1)
+#define REG_CON_MOD_MASK  (BIT(1) | BIT(2))
+#define REG_CON_START     BIT(3)
+#define REG_CON_STOP      BIT(4)
+#define REG_CON_LASTACK   BIT(5) /* 1: send NACK after last received byte */
+#define REG_CON_ACTACK    BIT(6) /* 1: stop if NACK is received */
+
+/* REG_MRXADDR bits */
+#define REG_MRXADDR_VALID(x) BIT(24 + (x)) /* [x*8+7:x*8] of MRX[R]ADDR valid */
+
+/* REG_IEN/REG_IPD bits */
+#define REG_INT_BTF       BIT(0) /* a byte was transmitted */
+#define REG_INT_BRF       BIT(1) /* a byte was received */
+#define REG_INT_MBTF      BIT(2) /* master data transmit finished */
+#define REG_INT_MBRF      BIT(3) /* master data receive finished */
+#define REG_INT_START     BIT(4) /* START condition generated */
+#define REG_INT_STOP      BIT(5) /* STOP condition generated */
+#define REG_INT_NAKRCV    BIT(6) /* NACK received */
+#define REG_INT_ALL       0x7f
+
+/* Constants */
+#define WAIT_TIMEOUT      200 /* ms */
+#define DEFAULT_SCL_RATE  (100 * 1000) /* Hz */
+
+enum rk3x_i2c_state {
+       STATE_IDLE,
+       STATE_START,
+       STATE_READ,
+       STATE_WRITE,
+       STATE_STOP
+};
+
+/**
+ * @grf_offset: offset inside the grf regmap for setting the i2c type
+ */
+struct rk3x_i2c_soc_data {
+       int grf_offset;
+};
+
+struct rk3x_i2c {
+       struct i2c_adapter adap;
+       struct device *dev;
+       struct rk3x_i2c_soc_data *soc_data;
+
+       /* Hardware resources */
+       void __iomem *regs;
+       struct clk *clk;
+
+       /* Settings */
+       unsigned int scl_frequency;
+
+       /* Synchronization & notification */
+       spinlock_t lock;
+       wait_queue_head_t wait;
+       bool busy;
+
+       /* Current message */
+       struct i2c_msg *msg;
+       u8 addr;
+       unsigned int mode;
+       bool is_last_msg;
+
+       /* I2C state machine */
+       enum rk3x_i2c_state state;
+       unsigned int processed; /* sent/received bytes */
+       int error;
+};
+
+static inline void i2c_writel(struct rk3x_i2c *i2c, u32 value,
+                             unsigned int offset)
+{
+       writel(value, i2c->regs + offset);
+}
+
+static inline u32 i2c_readl(struct rk3x_i2c *i2c, unsigned int offset)
+{
+       return readl(i2c->regs + offset);
+}
+
+/* Reset all interrupt pending bits */
+static inline void rk3x_i2c_clean_ipd(struct rk3x_i2c *i2c)
+{
+       i2c_writel(i2c, REG_INT_ALL, REG_IPD);
+}
+
+/**
+ * Generate a START condition, which triggers a REG_INT_START interrupt.
+ */
+static void rk3x_i2c_start(struct rk3x_i2c *i2c)
+{
+       u32 val;
+
+       rk3x_i2c_clean_ipd(i2c);
+       i2c_writel(i2c, REG_INT_START, REG_IEN);
+
+       /* enable adapter with correct mode, send START condition */
+       val = REG_CON_EN | REG_CON_MOD(i2c->mode) | REG_CON_START;
+
+       /* if we want to react to NACK, set ACTACK bit */
+       if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
+               val |= REG_CON_ACTACK;
+
+       i2c_writel(i2c, val, REG_CON);
+}
+
+/**
+ * Generate a STOP condition, which triggers a REG_INT_STOP interrupt.
+ *
+ * @error: Error code to return in rk3x_i2c_xfer
+ */
+static void rk3x_i2c_stop(struct rk3x_i2c *i2c, int error)
+{
+       unsigned int ctrl;
+
+       i2c->processed = 0;
+       i2c->msg = NULL;
+       i2c->error = error;
+
+       if (i2c->is_last_msg) {
+               /* Enable stop interrupt */
+               i2c_writel(i2c, REG_INT_STOP, REG_IEN);
+
+               i2c->state = STATE_STOP;
+
+               ctrl = i2c_readl(i2c, REG_CON);
+               ctrl |= REG_CON_STOP;
+               i2c_writel(i2c, ctrl, REG_CON);
+       } else {
+               /* Signal rk3x_i2c_xfer to start the next message. */
+               i2c->busy = false;
+               i2c->state = STATE_IDLE;
+
+               /*
+                * The HW is actually not capable of REPEATED START. But we can
+                * get the intended effect by resetting its internal state
+                * and issuing an ordinary START.
+                */
+               i2c_writel(i2c, 0, REG_CON);
+
+               /* signal that we are finished with the current msg */
+               wake_up(&i2c->wait);
+       }
+}
+
+/**
+ * Setup a read according to i2c->msg
+ */
+static void rk3x_i2c_prepare_read(struct rk3x_i2c *i2c)
+{
+       unsigned int len = i2c->msg->len - i2c->processed;
+       u32 con;
+
+       con = i2c_readl(i2c, REG_CON);
+
+       /*
+        * The hw can read up to 32 bytes at a time. If we need more than one
+        * chunk, send an ACK after the last byte of the current chunk.
+        */
+       if (unlikely(len > 32)) {
+               len = 32;
+               con &= ~REG_CON_LASTACK;
+       } else {
+               con |= REG_CON_LASTACK;
+       }
+
+       /* make sure we are in plain RX mode if we read a second chunk */
+       if (i2c->processed != 0) {
+               con &= ~REG_CON_MOD_MASK;
+               con |= REG_CON_MOD(REG_CON_MOD_RX);
+       }
+
+       i2c_writel(i2c, con, REG_CON);
+       i2c_writel(i2c, len, REG_MRXCNT);
+}
+
+/**
+ * Fill the transmit buffer with data from i2c->msg
+ */
+static void rk3x_i2c_fill_transmit_buf(struct rk3x_i2c *i2c)
+{
+       unsigned int i, j;
+       u32 cnt = 0;
+       u32 val;
+       u8 byte;
+
+       for (i = 0; i < 8; ++i) {
+               val = 0;
+               for (j = 0; j < 4; ++j) {
+                       if (i2c->processed == i2c->msg->len)
+                               break;
+
+                       if (i2c->processed == 0 && cnt == 0)
+                               byte = (i2c->addr & 0x7f) << 1;
+                       else
+                               byte = i2c->msg->buf[i2c->processed++];
+
+                       val |= byte << (j * 8);
+                       cnt++;
+               }
+
+               i2c_writel(i2c, val, TXBUFFER_BASE + 4 * i);
+
+               if (i2c->processed == i2c->msg->len)
+                       break;
+       }
+
+       i2c_writel(i2c, cnt, REG_MTXCNT);
+}
+
+
+/* IRQ handlers for individual states */
+
+static void rk3x_i2c_handle_start(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       if (!(ipd & REG_INT_START)) {
+               rk3x_i2c_stop(i2c, -EIO);
+               dev_warn(i2c->dev, "unexpected irq in START: 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               return;
+       }
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_START, REG_IPD);
+
+       /* disable start bit */
+       i2c_writel(i2c, i2c_readl(i2c, REG_CON) & ~REG_CON_START, REG_CON);
+
+       /* enable appropriate interrupts and transition */
+       if (i2c->mode == REG_CON_MOD_TX) {
+               i2c_writel(i2c, REG_INT_MBTF | REG_INT_NAKRCV, REG_IEN);
+               i2c->state = STATE_WRITE;
+               rk3x_i2c_fill_transmit_buf(i2c);
+       } else {
+               /* in any other case, we are going to be reading. */
+               i2c_writel(i2c, REG_INT_MBRF | REG_INT_NAKRCV, REG_IEN);
+               i2c->state = STATE_READ;
+               rk3x_i2c_prepare_read(i2c);
+       }
+}
+
+static void rk3x_i2c_handle_write(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       if (!(ipd & REG_INT_MBTF)) {
+               rk3x_i2c_stop(i2c, -EIO);
+               dev_err(i2c->dev, "unexpected irq in WRITE: 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               return;
+       }
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_MBTF, REG_IPD);
+
+       /* are we finished? */
+       if (i2c->processed == i2c->msg->len)
+               rk3x_i2c_stop(i2c, i2c->error);
+       else
+               rk3x_i2c_fill_transmit_buf(i2c);
+}
+
+static void rk3x_i2c_handle_read(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       unsigned int i;
+       unsigned int len = i2c->msg->len - i2c->processed;
+       u32 uninitialized_var(val);
+       u8 byte;
+
+       /* we only care for MBRF here. */
+       if (!(ipd & REG_INT_MBRF))
+               return;
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_MBRF, REG_IPD);
+
+       /* read the data from receive buffer */
+       for (i = 0; i < len; ++i) {
+               if (i % 4 == 0)
+                       val = i2c_readl(i2c, RXBUFFER_BASE + (i / 4) * 4);
+
+               byte = (val >> ((i % 4) * 8)) & 0xff;
+               i2c->msg->buf[i2c->processed++] = byte;
+       }
+
+       /* are we finished? */
+       if (i2c->processed == i2c->msg->len)
+               rk3x_i2c_stop(i2c, i2c->error);
+       else
+               rk3x_i2c_prepare_read(i2c);
+}
+
+static void rk3x_i2c_handle_stop(struct rk3x_i2c *i2c, unsigned int ipd)
+{
+       unsigned int con;
+
+       if (!(ipd & REG_INT_STOP)) {
+               rk3x_i2c_stop(i2c, -EIO);
+               dev_err(i2c->dev, "unexpected irq in STOP: 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               return;
+       }
+
+       /* ack interrupt */
+       i2c_writel(i2c, REG_INT_STOP, REG_IPD);
+
+       /* disable STOP bit */
+       con = i2c_readl(i2c, REG_CON);
+       con &= ~REG_CON_STOP;
+       i2c_writel(i2c, con, REG_CON);
+
+       i2c->busy = false;
+       i2c->state = STATE_IDLE;
+
+       /* signal rk3x_i2c_xfer that we are finished */
+       wake_up(&i2c->wait);
+}
+
+static irqreturn_t rk3x_i2c_irq(int irqno, void *dev_id)
+{
+       struct rk3x_i2c *i2c = dev_id;
+       unsigned int ipd;
+
+       spin_lock(&i2c->lock);
+
+       ipd = i2c_readl(i2c, REG_IPD);
+       if (i2c->state == STATE_IDLE) {
+               dev_warn(i2c->dev, "irq in STATE_IDLE, ipd = 0x%x\n", ipd);
+               rk3x_i2c_clean_ipd(i2c);
+               goto out;
+       }
+
+       dev_dbg(i2c->dev, "IRQ: state %d, ipd: %x\n", i2c->state, ipd);
+
+       /* Clean interrupt bits we don't care about */
+       ipd &= ~(REG_INT_BRF | REG_INT_BTF);
+
+       if (ipd & REG_INT_NAKRCV) {
+               /*
+                * We got a NACK in the last operation. Depending on whether
+                * IGNORE_NAK is set, we have to stop the operation and report
+                * an error.
+                */
+               i2c_writel(i2c, REG_INT_NAKRCV, REG_IPD);
+
+               ipd &= ~REG_INT_NAKRCV;
+
+               if (!(i2c->msg->flags & I2C_M_IGNORE_NAK))
+                       rk3x_i2c_stop(i2c, -ENXIO);
+       }
+
+       /* is there anything left to handle? */
+       if (unlikely(ipd == 0))
+               goto out;
+
+       switch (i2c->state) {
+       case STATE_START:
+               rk3x_i2c_handle_start(i2c, ipd);
+               break;
+       case STATE_WRITE:
+               rk3x_i2c_handle_write(i2c, ipd);
+               break;
+       case STATE_READ:
+               rk3x_i2c_handle_read(i2c, ipd);
+               break;
+       case STATE_STOP:
+               rk3x_i2c_handle_stop(i2c, ipd);
+               break;
+       case STATE_IDLE:
+               break;
+       }
+
+out:
+       spin_unlock(&i2c->lock);
+       return IRQ_HANDLED;
+}
+
+static void rk3x_i2c_set_scl_rate(struct rk3x_i2c *i2c, unsigned long scl_rate)
+{
+       unsigned long i2c_rate = clk_get_rate(i2c->clk);
+       unsigned int div;
+
+       /* SCL rate = (clk rate) / (8 * DIV) */
+       div = DIV_ROUND_UP(i2c_rate, scl_rate * 8);
+
+       /* The lower and upper half of the CLKDIV reg describe the length of
+        * SCL low & high periods. */
+       div = DIV_ROUND_UP(div, 2);
+
+       i2c_writel(i2c, (div << 16) | (div & 0xffff), REG_CLKDIV);
+}
+
+/**
+ * Setup I2C registers for an I2C operation specified by msgs, num.
+ *
+ * Must be called with i2c->lock held.
+ *
+ * @msgs: I2C msgs to process
+ * @num: Number of msgs
+ *
+ * returns: Number of I2C msgs processed or negative in case of error
+ */
+static int rk3x_i2c_setup(struct rk3x_i2c *i2c, struct i2c_msg *msgs, int num)
+{
+       u32 addr = (msgs[0].addr & 0x7f) << 1;
+       int ret = 0;
+
+       /*
+        * The I2C adapter can issue a small (len < 4) write packet before
+        * reading. This speeds up SMBus-style register reads.
+        * The MRXADDR/MRXRADDR hold the slave address and the slave register
+        * address in this case.
+        */
+
+       if (num >= 2 && msgs[0].len < 4 &&
+           !(msgs[0].flags & I2C_M_RD) && (msgs[1].flags & I2C_M_RD)) {
+               u32 reg_addr = 0;
+               int i;
+
+               dev_dbg(i2c->dev, "Combined write/read from addr 0x%x\n",
+                       addr >> 1);
+
+               /* Fill MRXRADDR with the register address(es) */
+               for (i = 0; i < msgs[0].len; ++i) {
+                       reg_addr |= msgs[0].buf[i] << (i * 8);
+                       reg_addr |= REG_MRXADDR_VALID(i);
+               }
+
+               /* msgs[0] is handled by hw. */
+               i2c->msg = &msgs[1];
+
+               i2c->mode = REG_CON_MOD_REGISTER_TX;
+
+               i2c_writel(i2c, addr | REG_MRXADDR_VALID(0), REG_MRXADDR);
+               i2c_writel(i2c, reg_addr, REG_MRXRADDR);
+
+               ret = 2;
+       } else {
+               /*
+                * We'll have to do it the boring way and process the msgs
+                * one-by-one.
+                */
+
+               if (msgs[0].flags & I2C_M_RD) {
+                       addr |= 1; /* set read bit */
+
+                       /*
+                        * We have to transmit the slave addr first. Use
+                        * MOD_REGISTER_TX for that purpose.
+                        */
+                       i2c->mode = REG_CON_MOD_REGISTER_TX;
+                       i2c_writel(i2c, addr | REG_MRXADDR_VALID(0),
+                                  REG_MRXADDR);
+                       i2c_writel(i2c, 0, REG_MRXRADDR);
+               } else {
+                       i2c->mode = REG_CON_MOD_TX;
+               }
+
+               i2c->msg = &msgs[0];
+
+               ret = 1;
+       }
+
+       i2c->addr = msgs[0].addr;
+       i2c->busy = true;
+       i2c->state = STATE_START;
+       i2c->processed = 0;
+       i2c->error = 0;
+
+       rk3x_i2c_clean_ipd(i2c);
+
+       return ret;
+}
+
+static int rk3x_i2c_xfer(struct i2c_adapter *adap,
+                        struct i2c_msg *msgs, int num)
+{
+       struct rk3x_i2c *i2c = (struct rk3x_i2c *)adap->algo_data;
+       unsigned long timeout, flags;
+       int ret = 0;
+       int i;
+
+       spin_lock_irqsave(&i2c->lock, flags);
+
+       clk_enable(i2c->clk);
+
+       /* The clock rate might have changed, so setup the divider again */
+       rk3x_i2c_set_scl_rate(i2c, i2c->scl_frequency);
+
+       i2c->is_last_msg = false;
+
+       /*
+        * Process msgs. We can handle more than one message at once (see
+        * rk3x_i2c_setup()).
+        */
+       for (i = 0; i < num; i += ret) {
+               ret = rk3x_i2c_setup(i2c, msgs + i, num - i);
+
+               if (ret < 0) {
+                       dev_err(i2c->dev, "rk3x_i2c_setup() failed\n");
+                       break;
+               }
+
+               if (i + ret >= num)
+                       i2c->is_last_msg = true;
+
+               spin_unlock_irqrestore(&i2c->lock, flags);
+
+               rk3x_i2c_start(i2c);
+
+               timeout = wait_event_timeout(i2c->wait, !i2c->busy,
+                                            msecs_to_jiffies(WAIT_TIMEOUT));
+
+               spin_lock_irqsave(&i2c->lock, flags);
+
+               if (timeout == 0) {
+                       dev_err(i2c->dev, "timeout, ipd: 0x%02x, state: %d\n",
+                               i2c_readl(i2c, REG_IPD), i2c->state);
+
+                       /* Force a STOP condition without interrupt */
+                       i2c_writel(i2c, 0, REG_IEN);
+                       i2c_writel(i2c, REG_CON_EN | REG_CON_STOP, REG_CON);
+
+                       i2c->state = STATE_IDLE;
+
+                       ret = -ETIMEDOUT;
+                       break;
+               }
+
+               if (i2c->error) {
+                       ret = i2c->error;
+                       break;
+               }
+       }
+
+       clk_disable(i2c->clk);
+       spin_unlock_irqrestore(&i2c->lock, flags);
+
+       return ret;
+}
+
+static u32 rk3x_i2c_func(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_PROTOCOL_MANGLING;
+}
+
+static const struct i2c_algorithm rk3x_i2c_algorithm = {
+       .master_xfer            = rk3x_i2c_xfer,
+       .functionality          = rk3x_i2c_func,
+};
+
+static struct rk3x_i2c_soc_data soc_data[3] = {
+       { .grf_offset = 0x154 }, /* rk3066 */
+       { .grf_offset = 0x0a4 }, /* rk3188 */
+       { .grf_offset = -1 },    /* no I2C switching needed */
+};
+
+static const struct of_device_id rk3x_i2c_match[] = {
+       { .compatible = "rockchip,rk3066-i2c", .data = (void *)&soc_data[0] },
+       { .compatible = "rockchip,rk3188-i2c", .data = (void *)&soc_data[1] },
+       { .compatible = "rockchip,rk3288-i2c", .data = (void *)&soc_data[2] },
+       {},
+};
+
+static int rk3x_i2c_probe(struct platform_device *pdev)
+{
+       struct device_node *np = pdev->dev.of_node;
+       const struct of_device_id *match;
+       struct rk3x_i2c *i2c;
+       struct resource *mem;
+       int ret = 0;
+       int bus_nr;
+       u32 value;
+       int irq;
+
+       i2c = devm_kzalloc(&pdev->dev, sizeof(struct rk3x_i2c), GFP_KERNEL);
+       if (!i2c)
+               return -ENOMEM;
+
+       match = of_match_node(rk3x_i2c_match, np);
+       i2c->soc_data = (struct rk3x_i2c_soc_data *)match->data;
+
+       if (of_property_read_u32(pdev->dev.of_node, "clock-frequency",
+                                &i2c->scl_frequency)) {
+               dev_info(&pdev->dev, "using default SCL frequency: %d\n",
+                        DEFAULT_SCL_RATE);
+               i2c->scl_frequency = DEFAULT_SCL_RATE;
+       }
+
+       if (i2c->scl_frequency == 0 || i2c->scl_frequency > 400 * 1000) {
+               dev_warn(&pdev->dev, "invalid SCL frequency specified.\n");
+               dev_warn(&pdev->dev, "using default SCL frequency: %d\n",
+                        DEFAULT_SCL_RATE);
+               i2c->scl_frequency = DEFAULT_SCL_RATE;
+       }
+
+       strlcpy(i2c->adap.name, "rk3x-i2c", sizeof(i2c->adap.name));
+       i2c->adap.owner = THIS_MODULE;
+       i2c->adap.algo = &rk3x_i2c_algorithm;
+       i2c->adap.retries = 3;
+       i2c->adap.dev.of_node = np;
+       i2c->adap.algo_data = i2c;
+       i2c->adap.dev.parent = &pdev->dev;
+
+       i2c->dev = &pdev->dev;
+
+       spin_lock_init(&i2c->lock);
+       init_waitqueue_head(&i2c->wait);
+
+       i2c->clk = devm_clk_get(&pdev->dev, NULL);
+       if (IS_ERR(i2c->clk)) {
+               dev_err(&pdev->dev, "cannot get clock\n");
+               return PTR_ERR(i2c->clk);
+       }
+
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       i2c->regs = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(i2c->regs))
+               return PTR_ERR(i2c->regs);
+
+       /* Try to set the I2C adapter number from dt */
+       bus_nr = of_alias_get_id(np, "i2c");
+
+       /*
+        * Switch to new interface if the SoC also offers the old one.
+        * The control bit is located in the GRF register space.
+        */
+       if (i2c->soc_data->grf_offset >= 0) {
+               struct regmap *grf;
+
+               grf = syscon_regmap_lookup_by_phandle(np, "rockchip,grf");
+               if (IS_ERR(grf)) {
+                       dev_err(&pdev->dev,
+                               "rk3x-i2c needs 'rockchip,grf' property\n");
+                       return PTR_ERR(grf);
+               }
+
+               if (bus_nr < 0) {
+                       dev_err(&pdev->dev, "rk3x-i2c needs i2cX alias");
+                       return -EINVAL;
+               }
+
+               /* 27+i: write mask, 11+i: value */
+               value = BIT(27 + bus_nr) | BIT(11 + bus_nr);
+
+               ret = regmap_write(grf, i2c->soc_data->grf_offset, value);
+               if (ret != 0) {
+                       dev_err(i2c->dev, "Could not write to GRF: %d\n", ret);
+                       return ret;
+               }
+       }
+
+       /* IRQ setup */
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(&pdev->dev, "cannot find rk3x IRQ\n");
+               return irq;
+       }
+
+       ret = devm_request_irq(&pdev->dev, irq, rk3x_i2c_irq,
+                              0, dev_name(&pdev->dev), i2c);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "cannot request IRQ\n");
+               return ret;
+       }
+
+       platform_set_drvdata(pdev, i2c);
+
+       ret = clk_prepare(i2c->clk);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Could not prepare clock\n");
+               return ret;
+       }
+
+       ret = i2c_add_adapter(&i2c->adap);
+       if (ret < 0) {
+               dev_err(&pdev->dev, "Could not register adapter\n");
+               goto err_clk;
+       }
+
+       dev_info(&pdev->dev, "Initialized RK3xxx I2C bus at %p\n", i2c->regs);
+
+       return 0;
+
+err_clk:
+       clk_unprepare(i2c->clk);
+       return ret;
+}
+
+static int rk3x_i2c_remove(struct platform_device *pdev)
+{
+       struct rk3x_i2c *i2c = platform_get_drvdata(pdev);
+
+       i2c_del_adapter(&i2c->adap);
+       clk_unprepare(i2c->clk);
+
+       return 0;
+}
+
+static struct platform_driver rk3x_i2c_driver = {
+       .probe   = rk3x_i2c_probe,
+       .remove  = rk3x_i2c_remove,
+       .driver  = {
+               .owner = THIS_MODULE,
+               .name  = "rk3x-i2c",
+               .of_match_table = rk3x_i2c_match,
+       },
+};
+
+module_platform_driver(rk3x_i2c_driver);
+
+MODULE_DESCRIPTION("Rockchip RK3xxx I2C Bus driver");
+MODULE_AUTHOR("Max Schwarz <max.schwarz@online.de>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
new file mode 100644 (file)
index 0000000..4d75d47
--- /dev/null
@@ -0,0 +1,344 @@
+/*
+ * P2WI (Push-Pull Two Wire Interface) bus driver.
+ *
+ * Author: Boris BREZILLON <boris.brezillon@free-electrons.com>
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2.  This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ *
+ * The P2WI controller looks like an SMBus controller which only supports byte
+ * data transfers. But, it differs from standard SMBus protocol on several
+ * aspects:
+ * - it supports only one slave device, and thus drop the address field
+ * - it adds a parity bit every 8bits of data
+ * - only one read access is required to read a byte (instead of a write
+ *   followed by a read access in standard SMBus protocol)
+ * - there's no Ack bit after each byte transfer
+ *
+ * This means this bus cannot be used to interface with standard SMBus
+ * devices (the only known device to support this interface is the AXP221
+ * PMIC).
+ *
+ */
+#include <linux/clk.h>
+#include <linux/i2c.h>
+#include <linux/io.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/reset.h>
+
+
+/* P2WI registers */
+#define P2WI_CTRL              0x0
+#define P2WI_CCR               0x4
+#define P2WI_INTE              0x8
+#define P2WI_INTS              0xc
+#define P2WI_DADDR0            0x10
+#define P2WI_DADDR1            0x14
+#define P2WI_DLEN              0x18
+#define P2WI_DATA0             0x1c
+#define P2WI_DATA1             0x20
+#define P2WI_LCR               0x24
+#define P2WI_PMCR              0x28
+
+/* CTRL fields */
+#define P2WI_CTRL_START_TRANS          BIT(7)
+#define P2WI_CTRL_ABORT_TRANS          BIT(6)
+#define P2WI_CTRL_GLOBAL_INT_ENB       BIT(1)
+#define P2WI_CTRL_SOFT_RST             BIT(0)
+
+/* CLK CTRL fields */
+#define P2WI_CCR_SDA_OUT_DELAY(v)      (((v) & 0x7) << 8)
+#define P2WI_CCR_MAX_CLK_DIV           0xff
+#define P2WI_CCR_CLK_DIV(v)            ((v) & P2WI_CCR_MAX_CLK_DIV)
+
+/* STATUS fields */
+#define P2WI_INTS_TRANS_ERR_ID(v)      (((v) >> 8) & 0xff)
+#define P2WI_INTS_LOAD_BSY             BIT(2)
+#define P2WI_INTS_TRANS_ERR            BIT(1)
+#define P2WI_INTS_TRANS_OVER           BIT(0)
+
+/* DATA LENGTH fields*/
+#define P2WI_DLEN_READ                 BIT(4)
+#define P2WI_DLEN_DATA_LENGTH(v)       ((v - 1) & 0x7)
+
+/* LINE CTRL fields*/
+#define P2WI_LCR_SCL_STATE             BIT(5)
+#define P2WI_LCR_SDA_STATE             BIT(4)
+#define P2WI_LCR_SCL_CTL               BIT(3)
+#define P2WI_LCR_SCL_CTL_EN            BIT(2)
+#define P2WI_LCR_SDA_CTL               BIT(1)
+#define P2WI_LCR_SDA_CTL_EN            BIT(0)
+
+/* PMU MODE CTRL fields */
+#define P2WI_PMCR_PMU_INIT_SEND                BIT(31)
+#define P2WI_PMCR_PMU_INIT_DATA(v)     (((v) & 0xff) << 16)
+#define P2WI_PMCR_PMU_MODE_REG(v)      (((v) & 0xff) << 8)
+#define P2WI_PMCR_PMU_DEV_ADDR(v)      ((v) & 0xff)
+
+#define P2WI_MAX_FREQ                  6000000
+
+struct p2wi {
+       struct i2c_adapter adapter;
+       struct completion complete;
+       unsigned int status;
+       void __iomem *regs;
+       struct clk *clk;
+       struct reset_control *rstc;
+       int slave_addr;
+};
+
+static irqreturn_t p2wi_interrupt(int irq, void *dev_id)
+{
+       struct p2wi *p2wi = dev_id;
+       unsigned long status;
+
+       status = readl(p2wi->regs + P2WI_INTS);
+       p2wi->status = status;
+
+       /* Clear interrupts */
+       status &= (P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR |
+                  P2WI_INTS_TRANS_OVER);
+       writel(status, p2wi->regs + P2WI_INTS);
+
+       complete(&p2wi->complete);
+
+       return IRQ_HANDLED;
+}
+
+static u32 p2wi_functionality(struct i2c_adapter *adap)
+{
+       return I2C_FUNC_SMBUS_BYTE_DATA;
+}
+
+static int p2wi_smbus_xfer(struct i2c_adapter *adap, u16 addr,
+                          unsigned short flags, char read_write,
+                          u8 command, int size, union i2c_smbus_data *data)
+{
+       struct p2wi *p2wi = i2c_get_adapdata(adap);
+       unsigned long dlen = P2WI_DLEN_DATA_LENGTH(1);
+
+       if (p2wi->slave_addr >= 0 && addr != p2wi->slave_addr) {
+               dev_err(&adap->dev, "invalid P2WI address\n");
+               return -EINVAL;
+       }
+
+       if (!data)
+               return -EINVAL;
+
+       writel(command, p2wi->regs + P2WI_DADDR0);
+
+       if (read_write == I2C_SMBUS_READ)
+               dlen |= P2WI_DLEN_READ;
+       else
+               writel(data->byte, p2wi->regs + P2WI_DATA0);
+
+       writel(dlen, p2wi->regs + P2WI_DLEN);
+
+       if (readl(p2wi->regs + P2WI_CTRL) & P2WI_CTRL_START_TRANS) {
+               dev_err(&adap->dev, "P2WI bus busy\n");
+               return -EBUSY;
+       }
+
+       reinit_completion(&p2wi->complete);
+
+       writel(P2WI_INTS_LOAD_BSY | P2WI_INTS_TRANS_ERR | P2WI_INTS_TRANS_OVER,
+              p2wi->regs + P2WI_INTE);
+
+       writel(P2WI_CTRL_START_TRANS | P2WI_CTRL_GLOBAL_INT_ENB,
+              p2wi->regs + P2WI_CTRL);
+
+       wait_for_completion(&p2wi->complete);
+
+       if (p2wi->status & P2WI_INTS_LOAD_BSY) {
+               dev_err(&adap->dev, "P2WI bus busy\n");
+               return -EBUSY;
+       }
+
+       if (p2wi->status & P2WI_INTS_TRANS_ERR) {
+               dev_err(&adap->dev, "P2WI bus xfer error\n");
+               return -ENXIO;
+       }
+
+       if (read_write == I2C_SMBUS_READ)
+               data->byte = readl(p2wi->regs + P2WI_DATA0);
+
+       return 0;
+}
+
+static const struct i2c_algorithm p2wi_algo = {
+       .smbus_xfer = p2wi_smbus_xfer,
+       .functionality = p2wi_functionality,
+};
+
+static const struct of_device_id p2wi_of_match_table[] = {
+       { .compatible = "allwinner,sun6i-a31-p2wi" },
+       {}
+};
+MODULE_DEVICE_TABLE(of, p2wi_of_match_table);
+
+static int p2wi_probe(struct platform_device *pdev)
+{
+       struct device *dev = &pdev->dev;
+       struct device_node *np = dev->of_node;
+       struct device_node *childnp;
+       unsigned long parent_clk_freq;
+       u32 clk_freq = 100000;
+       struct resource *r;
+       struct p2wi *p2wi;
+       u32 slave_addr;
+       int clk_div;
+       int irq;
+       int ret;
+
+       of_property_read_u32(np, "clock-frequency", &clk_freq);
+       if (clk_freq > P2WI_MAX_FREQ) {
+               dev_err(dev,
+                       "required clock-frequency (%u Hz) is too high (max = 6MHz)",
+                       clk_freq);
+               return -EINVAL;
+       }
+
+       if (of_get_child_count(np) > 1) {
+               dev_err(dev, "P2WI only supports one slave device\n");
+               return -EINVAL;
+       }
+
+       p2wi = devm_kzalloc(dev, sizeof(struct p2wi), GFP_KERNEL);
+       if (!p2wi)
+               return -ENOMEM;
+
+       p2wi->slave_addr = -1;
+
+       /*
+        * Authorize a p2wi node without any children to be able to use an
+        * i2c-dev from userpace.
+        * In this case the slave_addr is set to -1 and won't be checked when
+        * launching a P2WI transfer.
+        */
+       childnp = of_get_next_available_child(np, NULL);
+       if (childnp) {
+               ret = of_property_read_u32(childnp, "reg", &slave_addr);
+               if (ret) {
+                       dev_err(dev, "invalid slave address on node %s\n",
+                               childnp->full_name);
+                       return -EINVAL;
+               }
+
+               p2wi->slave_addr = slave_addr;
+       }
+
+       r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       p2wi->regs = devm_ioremap_resource(dev, r);
+       if (IS_ERR(p2wi->regs))
+               return PTR_ERR(p2wi->regs);
+
+       strlcpy(p2wi->adapter.name, pdev->name, sizeof(p2wi->adapter.name));
+       irq = platform_get_irq(pdev, 0);
+       if (irq < 0) {
+               dev_err(dev, "failed to retrieve irq: %d\n", irq);
+               return irq;
+       }
+
+       p2wi->clk = devm_clk_get(dev, NULL);
+       if (IS_ERR(p2wi->clk)) {
+               ret = PTR_ERR(p2wi->clk);
+               dev_err(dev, "failed to retrieve clk: %d\n", ret);
+               return ret;
+       }
+
+       ret = clk_prepare_enable(p2wi->clk);
+       if (ret) {
+               dev_err(dev, "failed to enable clk: %d\n", ret);
+               return ret;
+       }
+
+       parent_clk_freq = clk_get_rate(p2wi->clk);
+
+       p2wi->rstc = devm_reset_control_get(dev, NULL);
+       if (IS_ERR(p2wi->rstc)) {
+               ret = PTR_ERR(p2wi->rstc);
+               dev_err(dev, "failed to retrieve reset controller: %d\n", ret);
+               goto err_clk_disable;
+       }
+
+       ret = reset_control_deassert(p2wi->rstc);
+       if (ret) {
+               dev_err(dev, "failed to deassert reset line: %d\n", ret);
+               goto err_clk_disable;
+       }
+
+       init_completion(&p2wi->complete);
+       p2wi->adapter.dev.parent = dev;
+       p2wi->adapter.algo = &p2wi_algo;
+       p2wi->adapter.owner = THIS_MODULE;
+       p2wi->adapter.dev.of_node = pdev->dev.of_node;
+       platform_set_drvdata(pdev, p2wi);
+       i2c_set_adapdata(&p2wi->adapter, p2wi);
+
+       ret = devm_request_irq(dev, irq, p2wi_interrupt, 0, pdev->name, p2wi);
+       if (ret) {
+               dev_err(dev, "can't register interrupt handler irq%d: %d\n",
+                       irq, ret);
+               goto err_reset_assert;
+       }
+
+       writel(P2WI_CTRL_SOFT_RST, p2wi->regs + P2WI_CTRL);
+
+       clk_div = parent_clk_freq / clk_freq;
+       if (!clk_div) {
+               dev_warn(dev,
+                        "clock-frequency is too high, setting it to %lu Hz\n",
+                        parent_clk_freq);
+               clk_div = 1;
+       } else if (clk_div > P2WI_CCR_MAX_CLK_DIV) {
+               dev_warn(dev,
+                        "clock-frequency is too low, setting it to %lu Hz\n",
+                        parent_clk_freq / P2WI_CCR_MAX_CLK_DIV);
+               clk_div = P2WI_CCR_MAX_CLK_DIV;
+       }
+
+       writel(P2WI_CCR_SDA_OUT_DELAY(1) | P2WI_CCR_CLK_DIV(clk_div),
+              p2wi->regs + P2WI_CCR);
+
+       ret = i2c_add_adapter(&p2wi->adapter);
+       if (!ret)
+               return 0;
+
+err_reset_assert:
+       reset_control_assert(p2wi->rstc);
+
+err_clk_disable:
+       clk_disable_unprepare(p2wi->clk);
+
+       return ret;
+}
+
+static int p2wi_remove(struct platform_device *dev)
+{
+       struct p2wi *p2wi = platform_get_drvdata(dev);
+
+       reset_control_assert(p2wi->rstc);
+       clk_disable_unprepare(p2wi->clk);
+       i2c_del_adapter(&p2wi->adapter);
+
+       return 0;
+}
+
+static struct platform_driver p2wi_driver = {
+       .probe  = p2wi_probe,
+       .remove = p2wi_remove,
+       .driver = {
+               .owner = THIS_MODULE,
+               .name = "i2c-sunxi-p2wi",
+               .of_match_table = p2wi_of_match_table,
+       },
+};
+module_platform_driver(p2wi_driver);
+
+MODULE_AUTHOR("Boris BREZILLON <boris.brezillon@free-electrons.com>");
+MODULE_DESCRIPTION("Allwinner P2WI driver");
+MODULE_LICENSE("GPL v2");
index f7f9865..f6d313e 100644 (file)
@@ -40,6 +40,7 @@ config I2C_MUX_PCA9541
 
 config I2C_MUX_PCA954x
        tristate "Philips PCA954x I2C Mux/switches"
+       depends on GPIOLIB
        help
          If you say yes here you get support for the Philips PCA954x
          I2C mux/switch devices.
index 8fb46aa..a04c49f 100644 (file)
@@ -416,6 +416,7 @@ config BLK_DEV_CY82C693
 
 config BLK_DEV_CS5520
        tristate "Cyrix CS5510/20 MediaGX chipset support (VERY EXPERIMENTAL)"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for PIO tuning and virtual DMA on the Cyrix MediaGX
@@ -426,6 +427,7 @@ config BLK_DEV_CS5520
 
 config BLK_DEV_CS5530
        tristate "Cyrix/National Semiconductor CS5530 MediaGX chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the Cyrix MediaGX 5530 chipset. This
@@ -435,7 +437,7 @@ config BLK_DEV_CS5530
 
 config BLK_DEV_CS5535
        tristate "AMD CS5535 chipset support"
-       depends on X86 && !X86_64
+       depends on X86_32
        select BLK_DEV_IDEDMA_PCI
        help
          Include support for UDMA on the NSC/AMD CS5535 companion chipset.
@@ -486,6 +488,7 @@ config BLK_DEV_JMICRON
 
 config BLK_DEV_SC1200
        tristate "National SCx200 chipset support"
+       depends on X86_32 || COMPILE_TEST
        select BLK_DEV_IDEDMA_PCI
        help
          This driver adds support for the on-board IDE controller on the
index 2a744a9..a3d3b17 100644 (file)
@@ -853,8 +853,9 @@ static int init_irq (ide_hwif_t *hwif)
        if (irq_handler == NULL)
                irq_handler = ide_intr;
 
-       if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
-               goto out_up;
+       if (!host->get_lock)
+               if (request_irq(hwif->irq, irq_handler, sa, hwif->name, hwif))
+                       goto out_up;
 
 #if !defined(__mc68000__)
        printk(KERN_INFO "%s at 0x%03lx-0x%03lx,0x%03lx on irq %d", hwif->name,
@@ -1533,7 +1534,8 @@ static void ide_unregister(ide_hwif_t *hwif)
 
        ide_proc_unregister_port(hwif);
 
-       free_irq(hwif->irq, hwif);
+       if (!hwif->host->get_lock)
+               free_irq(hwif->irq, hwif);
 
        device_unregister(hwif->portdev);
        device_unregister(&hwif->gendev);
index 69abf91..54e464e 100644 (file)
@@ -110,7 +110,6 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
        struct accel_3d_state *accel_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -151,14 +150,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                        &accel_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                        &accel_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index 17aeea1..2a5fa9a 100644 (file)
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
        {6, 250000}, {1, 560000}
 };
 
+/* 
+ * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
+ * The userspace interface uses m/s^2 and we declare micro units
+ * So scale factor is given by:
+ *     g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ */
 static const int mma8452_scales[3][2] = {
-       {0, 977}, {0, 1953}, {0, 3906}
+       {0, 9577}, {0, 19154}, {0, 38307}
 };
 
 static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
index 39b4cb4..6eba301 100644 (file)
@@ -427,9 +427,12 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev,
        int ret;
        struct ad799x_state *st = iio_priv(indio_dev);
 
+       if (val < 0 || val > RES_MASK(chan->scan_type.realbits))
+               return -EINVAL;
+
        mutex_lock(&indio_dev->mlock);
        ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info),
-               val);
+               val << chan->scan_type.shift);
        mutex_unlock(&indio_dev->mlock);
 
        return ret;
@@ -452,7 +455,8 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
        mutex_unlock(&indio_dev->mlock);
        if (ret < 0)
                return ret;
-       *val = valin;
+       *val = (valin >> chan->scan_type.shift) &
+               RES_MASK(chan->scan_type.realbits);
 
        return IIO_VAL_INT;
 }
index 3b5bacd..2b6a9ce 100644 (file)
@@ -510,12 +510,11 @@ static int at91_adc_channel_init(struct iio_dev *idev)
        return idev->num_channels;
 }
 
-static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
+static int at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
                                             struct at91_adc_trigger *triggers,
                                             const char *trigger_name)
 {
        struct at91_adc_state *st = iio_priv(idev);
-       u8 value = 0;
        int i;
 
        for (i = 0; i < st->trigger_number; i++) {
@@ -528,15 +527,16 @@ static u8 at91_adc_get_trigger_value_by_name(struct iio_dev *idev,
                        return -ENOMEM;
 
                if (strcmp(trigger_name, name) == 0) {
-                       value = triggers[i].value;
                        kfree(name);
-                       break;
+                       if (triggers[i].value == 0)
+                               return -EINVAL;
+                       return triggers[i].value;
                }
 
                kfree(name);
        }
 
-       return value;
+       return -EINVAL;
 }
 
 static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
@@ -546,14 +546,14 @@ static int at91_adc_configure_trigger(struct iio_trigger *trig, bool state)
        struct iio_buffer *buffer = idev->buffer;
        struct at91_adc_reg_desc *reg = st->registers;
        u32 status = at91_adc_readl(st, reg->trigger_register);
-       u8 value;
+       int value;
        u8 bit;
 
        value = at91_adc_get_trigger_value_by_name(idev,
                                                   st->trigger_list,
                                                   idev->trig->name);
-       if (value == 0)
-               return -EINVAL;
+       if (value < 0)
+               return value;
 
        if (state) {
                st->buffer = kmalloc(idev->scan_bytes, GFP_KERNEL);
index 6989c16..b58d630 100644 (file)
@@ -121,8 +121,8 @@ static int men_z188_probe(struct mcb_device *dev,
        indio_dev->num_channels = ARRAY_SIZE(z188_adc_iio_channels);
 
        mem = mcb_request_mem(dev, "z188-adc");
-       if (!mem)
-               return -ENOMEM;
+       if (IS_ERR(mem))
+               return PTR_ERR(mem);
 
        adc->base = ioremap(mem->start, resource_size(mem));
        if (adc->base == NULL)
index a4db302..d5dc4c6 100644 (file)
@@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
                        return -EAGAIN;
                }
        }
-       map_val = chan->channel + TOTAL_CHANNELS;
+       map_val = adc_dev->channel_step[chan->scan_index];
 
        /*
         * We check the complete FIFO. We programmed just one entry but in case
index 7de1c4c..eb86786 100644 (file)
@@ -645,6 +645,7 @@ int twl4030_get_madc_conversion(int channel_no)
        req.channels = (1 << channel_no);
        req.method = TWL4030_MADC_SW2;
        req.active = 0;
+       req.raw = 0;
        req.func_cb = NULL;
        ret = twl4030_madc_conversion(&req);
        if (ret < 0)
index 73282ce..a3109a6 100644 (file)
@@ -75,6 +75,9 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
                                        (s32)report_val);
        }
 
+       sensor_hub_get_feature(st->hsdev, st->power_state.report_id,
+                                       st->power_state.index,
+                                       &state_val);
        return 0;
 }
 EXPORT_SYMBOL(hid_sensor_power_state);
index 40f4e49..fa034a3 100644 (file)
@@ -110,7 +110,6 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
        struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -151,14 +150,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                        &gyro_state->common_attributes, val, val2);
-                       ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                        &gyro_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index 258a973..bfbf4d4 100644 (file)
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
                        &indio_dev->event_interface->dev_attr_list);
                kfree(postfix);
 
+               if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
+                       continue;
+
                if (ret)
                        return ret;
 
index d833d55..c749700 100644 (file)
@@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
                else if (name && index >= 0) {
                        pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
                                np->full_name, name ? name : "", index);
-                       return chan;
+                       return NULL;
                }
 
                /*
@@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
                 */
                np = np->parent;
                if (np && !of_get_property(np, "io-channel-ranges", NULL))
-                       break;
+                       return NULL;
        }
+
        return chan;
 }
 
@@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
                if (channel != NULL)
                        return channel;
        }
+
        return iio_channel_get_sys(name, channel_name);
 }
 EXPORT_SYMBOL_GPL(iio_channel_get);
index f34c943..96e71e1 100644 (file)
@@ -79,7 +79,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
        struct als_state *als_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -129,14 +128,12 @@ static int als_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                                &als_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                                &als_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index d203ef4..412bae8 100644 (file)
@@ -74,7 +74,6 @@ static int prox_read_raw(struct iio_dev *indio_dev,
        struct prox_state *prox_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -125,14 +124,12 @@ static int prox_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                                &prox_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                                &prox_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index fe063a0..7525699 100644 (file)
@@ -52,6 +52,7 @@
 
 struct tcs3472_data {
        struct i2c_client *client;
+       struct mutex lock;
        u8 enable;
        u8 control;
        u8 atime;
@@ -116,10 +117,17 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
 
        switch (mask) {
        case IIO_CHAN_INFO_RAW:
+               if (iio_buffer_enabled(indio_dev))
+                       return -EBUSY;
+
+               mutex_lock(&data->lock);
                ret = tcs3472_req_data(data);
-               if (ret < 0)
+               if (ret < 0) {
+                       mutex_unlock(&data->lock);
                        return ret;
+               }
                ret = i2c_smbus_read_word_data(data->client, chan->address);
+               mutex_unlock(&data->lock);
                if (ret < 0)
                        return ret;
                *val = ret;
@@ -255,6 +263,7 @@ static int tcs3472_probe(struct i2c_client *client,
        data = iio_priv(indio_dev);
        i2c_set_clientdata(client, indio_dev);
        data->client = client;
+       mutex_init(&data->lock);
 
        indio_dev->dev.parent = &client->dev;
        indio_dev->info = &tcs3472_info;
index 09ea5c4..ea08313 100644 (file)
@@ -373,8 +373,6 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
 {
        struct ak8975_data *data = iio_priv(indio_dev);
        struct i2c_client *client = data->client;
-       u16 meas_reg;
-       s16 raw;
        int ret;
 
        mutex_lock(&data->lock);
@@ -422,16 +420,11 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
                dev_err(&client->dev, "Read axis data fails\n");
                goto exit;
        }
-       meas_reg = ret;
 
        mutex_unlock(&data->lock);
 
-       /* Endian conversion of the measured values. */
-       raw = (s16) (le16_to_cpu(meas_reg));
-
        /* Clamp to valid range. */
-       raw = clamp_t(s16, raw, -4096, 4095);
-       *val = raw;
+       *val = clamp_t(s16, ret, -4096, 4095);
        return IIO_VAL_INT;
 
 exit:
index 41cf29e..b2b0937 100644 (file)
@@ -110,7 +110,6 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
        struct magn_3d_state *magn_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -153,14 +152,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                        &magn_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                        &magn_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index 1cd190c..2c0d2a4 100644 (file)
@@ -78,7 +78,6 @@ static int press_read_raw(struct iio_dev *indio_dev,
        struct press_state *press_state = iio_priv(indio_dev);
        int report_id = -1;
        u32 address;
-       int ret;
        int ret_type;
        s32 poll_value;
 
@@ -128,14 +127,12 @@ static int press_read_raw(struct iio_dev *indio_dev,
                ret_type = IIO_VAL_INT;
                break;
        case IIO_CHAN_INFO_SAMP_FREQ:
-               ret = hid_sensor_read_samp_freq_value(
+               ret_type = hid_sensor_read_samp_freq_value(
                                &press_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        case IIO_CHAN_INFO_HYSTERESIS:
-               ret = hid_sensor_read_raw_hyst_value(
+               ret_type = hid_sensor_read_raw_hyst_value(
                                &press_state->common_attributes, val, val2);
-               ret_type = IIO_VAL_INT_PLUS_MICRO;
                break;
        default:
                ret_type = -EINVAL;
index ba6d0c5..01b2e0b 100644 (file)
@@ -98,7 +98,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
                        mutex_unlock(&data->lock);
                        if (ret < 0)
                                return ret;
-                       *val = sign_extend32(be32_to_cpu(tmp) >> 12, 23);
+                       *val = be32_to_cpu(tmp) >> 12;
                        return IIO_VAL_INT;
                case IIO_TEMP: /* in 0.0625 celsius / LSB */
                        mutex_lock(&data->lock);
@@ -112,7 +112,7 @@ static int mpl3115_read_raw(struct iio_dev *indio_dev,
                        mutex_unlock(&data->lock);
                        if (ret < 0)
                                return ret;
-                       *val = sign_extend32(be32_to_cpu(tmp) >> 20, 15);
+                       *val = sign_extend32(be32_to_cpu(tmp) >> 20, 11);
                        return IIO_VAL_INT;
                default:
                        return -EINVAL;
@@ -185,7 +185,7 @@ static const struct iio_chan_spec mpl3115_channels[] = {
                        BIT(IIO_CHAN_INFO_SCALE),
                .scan_index = 0,
                .scan_type = {
-                       .sign = 's',
+                       .sign = 'u',
                        .realbits = 20,
                        .storagebits = 32,
                        .shift = 12,
index 8af33cf..2d5cbf4 100644 (file)
@@ -734,7 +734,7 @@ static struct net_device *c2_pseudo_netdev_init(struct c2_dev *c2dev)
        /* change ethxxx to iwxxx */
        strcpy(name, "iw");
        strcat(name, &c2dev->netdev->name[3]);
-       netdev = alloc_netdev(0, name, setup);
+       netdev = alloc_netdev(0, name, NET_NAME_UNKNOWN, setup);
        if (!netdev) {
                printk(KERN_ERR PFX "%s -  etherdev alloc failed",
                        __func__);
index 5e153f6..c2fb71c 100644 (file)
@@ -79,9 +79,10 @@ static int dack_mode = 1;
 module_param(dack_mode, int, 0644);
 MODULE_PARM_DESC(dack_mode, "Delayed ack mode (default=1)");
 
-int c4iw_max_read_depth = 8;
+uint c4iw_max_read_depth = 32;
 module_param(c4iw_max_read_depth, int, 0644);
-MODULE_PARM_DESC(c4iw_max_read_depth, "Per-connection max ORD/IRD (default=8)");
+MODULE_PARM_DESC(c4iw_max_read_depth,
+                "Per-connection max ORD/IRD (default=32)");
 
 static int enable_tcp_timestamps;
 module_param(enable_tcp_timestamps, int, 0644);
@@ -432,8 +433,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
  */
 static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
 {
+       struct c4iw_ep *ep = handle;
+
        printk(KERN_ERR MOD "ARP failure duing connect\n");
        kfree_skb(skb);
+       connect_reply_upcall(ep, -EHOSTUNREACH);
+       state_set(&ep->com, DEAD);
+       remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
+       cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
+       dst_release(ep->dst);
+       cxgb4_l2t_release(ep->l2t);
+       c4iw_put_ep(&ep->com);
 }
 
 /*
@@ -465,7 +475,8 @@ static void send_flowc(struct c4iw_ep *ep, struct sk_buff *skb)
                                          16)) | FW_WR_FLOWID(ep->hwtid));
 
        flowc->mnemval[0].mnemonic = FW_FLOWC_MNEM_PFNVFN;
-       flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8);
+       flowc->mnemval[0].val = cpu_to_be32(FW_PFVF_CMD_PFN
+                                           (ep->com.dev->rdev.lldi.pf));
        flowc->mnemval[1].mnemonic = FW_FLOWC_MNEM_CH;
        flowc->mnemval[1].val = cpu_to_be32(ep->tx_chan);
        flowc->mnemval[2].mnemonic = FW_FLOWC_MNEM_PORT;
@@ -658,7 +669,7 @@ static int send_connect(struct c4iw_ep *ep)
                opt2 |= T5_OPT_2_VALID;
                opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
        }
-       t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure);
+       t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
 
        if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
                if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -812,6 +823,8 @@ static void send_mpa_req(struct c4iw_ep *ep, struct sk_buff *skb,
        if (mpa_rev_to_use == 2) {
                mpa->private_data_size = htons(ntohs(mpa->private_data_size) +
                                               sizeof (struct mpa_v2_conn_params));
+               PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
+                    ep->ord);
                mpa_v2_params.ird = htons((u16)ep->ird);
                mpa_v2_params.ord = htons((u16)ep->ord);
 
@@ -1181,8 +1194,8 @@ static int connect_request_upcall(struct c4iw_ep *ep)
                        sizeof(struct mpa_v2_conn_params);
        } else {
                /* this means MPA_v1 is used. Send max supported */
-               event.ord = c4iw_max_read_depth;
-               event.ird = c4iw_max_read_depth;
+               event.ord = cur_max_read_depth(ep->com.dev);
+               event.ird = cur_max_read_depth(ep->com.dev);
                event.private_data_len = ep->plen;
                event.private_data = ep->mpa_pkt + sizeof(struct mpa_message);
        }
@@ -1246,6 +1259,8 @@ static int update_rx_credits(struct c4iw_ep *ep, u32 credits)
        return credits;
 }
 
+#define RELAXED_IRD_NEGOTIATION 1
+
 static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
 {
        struct mpa_message *mpa;
@@ -1357,17 +1372,33 @@ static int process_mpa_reply(struct c4iw_ep *ep, struct sk_buff *skb)
                                MPA_V2_IRD_ORD_MASK;
                        resp_ord = ntohs(mpa_v2_params->ord) &
                                MPA_V2_IRD_ORD_MASK;
+                       PDBG("%s responder ird %u ord %u ep ird %u ord %u\n",
+                            __func__, resp_ird, resp_ord, ep->ird, ep->ord);
 
                        /*
                         * This is a double-check. Ideally, below checks are
                         * not required since ird/ord stuff has been taken
                         * care of in c4iw_accept_cr
                         */
-                       if ((ep->ird < resp_ord) || (ep->ord > resp_ird)) {
+                       if (ep->ird < resp_ord) {
+                               if (RELAXED_IRD_NEGOTIATION && resp_ord <=
+                                   ep->com.dev->rdev.lldi.max_ordird_qp)
+                                       ep->ird = resp_ord;
+                               else
+                                       insuff_ird = 1;
+                       } else if (ep->ird > resp_ord) {
+                               ep->ird = resp_ord;
+                       }
+                       if (ep->ord > resp_ird) {
+                               if (RELAXED_IRD_NEGOTIATION)
+                                       ep->ord = resp_ird;
+                               else
+                                       insuff_ird = 1;
+                       }
+                       if (insuff_ird) {
                                err = -ENOMEM;
                                ep->ird = resp_ord;
                                ep->ord = resp_ird;
-                               insuff_ird = 1;
                        }
 
                        if (ntohs(mpa_v2_params->ird) &
@@ -1570,6 +1601,8 @@ static void process_mpa_request(struct c4iw_ep *ep, struct sk_buff *skb)
                                MPA_V2_IRD_ORD_MASK;
                        ep->ord = ntohs(mpa_v2_params->ord) &
                                MPA_V2_IRD_ORD_MASK;
+                       PDBG("%s initiator ird %u ord %u\n", __func__, ep->ird,
+                            ep->ord);
                        if (ntohs(mpa_v2_params->ird) & MPA_V2_PEER2PEER_MODEL)
                                if (peer2peer) {
                                        if (ntohs(mpa_v2_params->ord) &
@@ -1789,6 +1822,20 @@ static int is_neg_adv(unsigned int status)
               status == CPL_ERR_KEEPALV_NEG_ADVICE;
 }
 
+static char *neg_adv_str(unsigned int status)
+{
+       switch (status) {
+       case CPL_ERR_RTX_NEG_ADVICE:
+               return "Retransmit timeout";
+       case CPL_ERR_PERSIST_NEG_ADVICE:
+               return "Persist timeout";
+       case CPL_ERR_KEEPALV_NEG_ADVICE:
+               return "Keepalive timeout";
+       default:
+               return "Unknown";
+       }
+}
+
 static void set_tcp_window(struct c4iw_ep *ep, struct port_info *pi)
 {
        ep->snd_win = snd_win;
@@ -1987,8 +2034,9 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
             status, status2errno(status));
 
        if (is_neg_adv(status)) {
-               printk(KERN_WARNING MOD "Connection problems for atid %u\n",
-                       atid);
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "Connection problems for atid %u status %u (%s)\n",
+                        atid, status, neg_adv_str(status));
                return 0;
        }
 
@@ -2180,7 +2228,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
        PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
        BUG_ON(skb_cloned(skb));
        skb_trim(skb, sizeof(struct cpl_tid_release));
-       skb_get(skb);
        release_tid(&dev->rdev, hwtid, skb);
        return;
 }
@@ -2464,8 +2511,9 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = lookup_tid(t, tid);
        if (is_neg_adv(req->status)) {
-               PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
-                    ep->hwtid);
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "Negative advice on abort - tid %u status %d (%s)\n",
+                        ep->hwtid, req->status, neg_adv_str(req->status));
                return 0;
        }
        PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -2723,8 +2771,8 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        BUG_ON(!qp);
 
        set_bit(ULP_ACCEPT, &ep->com.history);
-       if ((conn_param->ord > c4iw_max_read_depth) ||
-           (conn_param->ird > c4iw_max_read_depth)) {
+       if ((conn_param->ord > cur_max_read_depth(ep->com.dev)) ||
+           (conn_param->ird > cur_max_read_depth(ep->com.dev))) {
                abort_connection(ep, NULL, GFP_KERNEL);
                err = -EINVAL;
                goto err;
@@ -2732,31 +2780,41 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
 
        if (ep->mpa_attr.version == 2 && ep->mpa_attr.enhanced_rdma_conn) {
                if (conn_param->ord > ep->ird) {
-                       ep->ird = conn_param->ird;
-                       ep->ord = conn_param->ord;
-                       send_mpa_reject(ep, conn_param->private_data,
-                                       conn_param->private_data_len);
-                       abort_connection(ep, NULL, GFP_KERNEL);
-                       err = -ENOMEM;
-                       goto err;
+                       if (RELAXED_IRD_NEGOTIATION) {
+                               ep->ord = ep->ird;
+                       } else {
+                               ep->ird = conn_param->ird;
+                               ep->ord = conn_param->ord;
+                               send_mpa_reject(ep, conn_param->private_data,
+                                               conn_param->private_data_len);
+                               abort_connection(ep, NULL, GFP_KERNEL);
+                               err = -ENOMEM;
+                               goto err;
+                       }
                }
-               if (conn_param->ird > ep->ord) {
-                       if (!ep->ord)
-                               conn_param->ird = 1;
-                       else {
+               if (conn_param->ird < ep->ord) {
+                       if (RELAXED_IRD_NEGOTIATION &&
+                           ep->ord <= h->rdev.lldi.max_ordird_qp) {
+                               conn_param->ird = ep->ord;
+                       } else {
                                abort_connection(ep, NULL, GFP_KERNEL);
                                err = -ENOMEM;
                                goto err;
                        }
                }
-
        }
        ep->ird = conn_param->ird;
        ep->ord = conn_param->ord;
 
-       if (ep->mpa_attr.version != 2)
+       if (ep->mpa_attr.version == 1) {
                if (peer2peer && ep->ird == 0)
                        ep->ird = 1;
+       } else {
+               if (peer2peer &&
+                   (ep->mpa_attr.p2p_type != FW_RI_INIT_P2PTYPE_DISABLED) &&
+                   (p2p_type == FW_RI_INIT_P2PTYPE_READ_REQ) && ep->ord == 0)
+                       ep->ird = 1;
+       }
 
        PDBG("%s %d ird %d ord %d\n", __func__, __LINE__, ep->ird, ep->ord);
 
@@ -2795,6 +2853,7 @@ int c4iw_accept_cr(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        return 0;
 err1:
        ep->com.cm_id = NULL;
+       abort_connection(ep, NULL, GFP_KERNEL);
        cm_id->rem_ref(cm_id);
 err:
        mutex_unlock(&ep->com.mutex);
@@ -2878,8 +2937,8 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
        int iptype;
        int iwpm_err = 0;
 
-       if ((conn_param->ord > c4iw_max_read_depth) ||
-           (conn_param->ird > c4iw_max_read_depth)) {
+       if ((conn_param->ord > cur_max_read_depth(dev)) ||
+           (conn_param->ird > cur_max_read_depth(dev))) {
                err = -EINVAL;
                goto out;
        }
@@ -3859,8 +3918,9 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
                return 0;
        }
        if (is_neg_adv(req->status)) {
-               PDBG("%s neg_adv_abort ep %p tid %u\n", __func__, ep,
-                    ep->hwtid);
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "Negative advice on abort - tid %u status %d (%s)\n",
+                        ep->hwtid, req->status, neg_adv_str(req->status));
                kfree_skb(skb);
                return 0;
        }
@@ -3917,7 +3977,7 @@ int __init c4iw_cm_init(void)
        return 0;
 }
 
-void __exit c4iw_cm_term(void)
+void c4iw_cm_term(void)
 {
        WARN_ON(!list_empty(&timeout_list));
        flush_workqueue(workq);
index c04292c..0f773e7 100644 (file)
@@ -633,11 +633,15 @@ proc_cqe:
                wq->sq.cidx = (uint16_t)idx;
                PDBG("%s completing sq idx %u\n", __func__, wq->sq.cidx);
                *cookie = wq->sq.sw_sq[wq->sq.cidx].wr_id;
+               if (c4iw_wr_log)
+                       c4iw_log_wr_stats(wq, hw_cqe);
                t4_sq_consume(wq);
        } else {
                PDBG("%s completing rq idx %u\n", __func__, wq->rq.cidx);
                *cookie = wq->rq.sw_rq[wq->rq.cidx].wr_id;
                BUG_ON(t4_rq_empty(wq));
+               if (c4iw_wr_log)
+                       c4iw_log_wr_stats(wq, hw_cqe);
                t4_rq_consume(wq);
                goto skip_cqe;
        }
@@ -895,7 +899,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
        /*
         * Make actual HW queue 2x to avoid cdix_inc overflows.
         */
-       hwentries = min(entries * 2, T4_MAX_IQ_SIZE);
+       hwentries = min(entries * 2, rhp->rdev.hw_queue.t4_max_iq_size);
 
        /*
         * Make HW queue at least 64 entries so GTS updates aren't too
@@ -909,14 +913,8 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
        /*
         * memsize must be a multiple of the page size if its a user cq.
         */
-       if (ucontext) {
+       if (ucontext)
                memsize = roundup(memsize, PAGE_SIZE);
-               hwentries = memsize / sizeof *chp->cq.queue;
-               while (hwentries > T4_MAX_IQ_SIZE) {
-                       memsize -= PAGE_SIZE;
-                       hwentries = memsize / sizeof *chp->cq.queue;
-               }
-       }
        chp->cq.size = hwentries;
        chp->cq.memsize = memsize;
        chp->cq.vector = vector;
index dd93aad..f25df52 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/moduleparam.h>
 #include <linux/debugfs.h>
 #include <linux/vmalloc.h>
+#include <linux/math64.h>
 
 #include <rdma/ib_verbs.h>
 
@@ -55,6 +56,15 @@ module_param(allow_db_coalescing_on_t5, int, 0644);
 MODULE_PARM_DESC(allow_db_coalescing_on_t5,
                 "Allow DB Coalescing on T5 (default = 0)");
 
+int c4iw_wr_log = 0;
+module_param(c4iw_wr_log, int, 0444);
+MODULE_PARM_DESC(c4iw_wr_log, "Enables logging of work request timing data.");
+
+int c4iw_wr_log_size_order = 12;
+module_param(c4iw_wr_log_size_order, int, 0444);
+MODULE_PARM_DESC(c4iw_wr_log_size_order,
+                "Number of entries (log2) in the work request timing log.");
+
 struct uld_ctx {
        struct list_head entry;
        struct cxgb4_lld_info lldi;
@@ -103,6 +113,117 @@ static ssize_t debugfs_read(struct file *file, char __user *buf, size_t count,
        return simple_read_from_buffer(buf, count, ppos, d->buf, d->pos);
 }
 
+void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe)
+{
+       struct wr_log_entry le;
+       int idx;
+
+       if (!wq->rdev->wr_log)
+               return;
+
+       idx = (atomic_inc_return(&wq->rdev->wr_log_idx) - 1) &
+               (wq->rdev->wr_log_size - 1);
+       le.poll_sge_ts = cxgb4_read_sge_timestamp(wq->rdev->lldi.ports[0]);
+       getnstimeofday(&le.poll_host_ts);
+       le.valid = 1;
+       le.cqe_sge_ts = CQE_TS(cqe);
+       if (SQ_TYPE(cqe)) {
+               le.qid = wq->sq.qid;
+               le.opcode = CQE_OPCODE(cqe);
+               le.post_host_ts = wq->sq.sw_sq[wq->sq.cidx].host_ts;
+               le.post_sge_ts = wq->sq.sw_sq[wq->sq.cidx].sge_ts;
+               le.wr_id = CQE_WRID_SQ_IDX(cqe);
+       } else {
+               le.qid = wq->rq.qid;
+               le.opcode = FW_RI_RECEIVE;
+               le.post_host_ts = wq->rq.sw_rq[wq->rq.cidx].host_ts;
+               le.post_sge_ts = wq->rq.sw_rq[wq->rq.cidx].sge_ts;
+               le.wr_id = CQE_WRID_MSN(cqe);
+       }
+       wq->rdev->wr_log[idx] = le;
+}
+
+static int wr_log_show(struct seq_file *seq, void *v)
+{
+       struct c4iw_dev *dev = seq->private;
+       struct timespec prev_ts = {0, 0};
+       struct wr_log_entry *lep;
+       int prev_ts_set = 0;
+       int idx, end;
+
+#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
+
+       idx = atomic_read(&dev->rdev.wr_log_idx) &
+               (dev->rdev.wr_log_size - 1);
+       end = idx - 1;
+       if (end < 0)
+               end = dev->rdev.wr_log_size - 1;
+       lep = &dev->rdev.wr_log[idx];
+       while (idx != end) {
+               if (lep->valid) {
+                       if (!prev_ts_set) {
+                               prev_ts_set = 1;
+                               prev_ts = lep->poll_host_ts;
+                       }
+                       seq_printf(seq, "%04u: sec %lu nsec %lu qid %u opcode "
+                                  "%u %s 0x%x host_wr_delta sec %lu nsec %lu "
+                                  "post_sge_ts 0x%llx cqe_sge_ts 0x%llx "
+                                  "poll_sge_ts 0x%llx post_poll_delta_ns %llu "
+                                  "cqe_poll_delta_ns %llu\n",
+                                  idx,
+                                  timespec_sub(lep->poll_host_ts,
+                                               prev_ts).tv_sec,
+                                  timespec_sub(lep->poll_host_ts,
+                                               prev_ts).tv_nsec,
+                                  lep->qid, lep->opcode,
+                                  lep->opcode == FW_RI_RECEIVE ?
+                                                       "msn" : "wrid",
+                                  lep->wr_id,
+                                  timespec_sub(lep->poll_host_ts,
+                                               lep->post_host_ts).tv_sec,
+                                  timespec_sub(lep->poll_host_ts,
+                                               lep->post_host_ts).tv_nsec,
+                                  lep->post_sge_ts, lep->cqe_sge_ts,
+                                  lep->poll_sge_ts,
+                                  ts2ns(lep->poll_sge_ts - lep->post_sge_ts),
+                                  ts2ns(lep->poll_sge_ts - lep->cqe_sge_ts));
+                       prev_ts = lep->poll_host_ts;
+               }
+               idx++;
+               if (idx > (dev->rdev.wr_log_size - 1))
+                       idx = 0;
+               lep = &dev->rdev.wr_log[idx];
+       }
+#undef ts2ns
+       return 0;
+}
+
+static int wr_log_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, wr_log_show, inode->i_private);
+}
+
+static ssize_t wr_log_clear(struct file *file, const char __user *buf,
+                           size_t count, loff_t *pos)
+{
+       struct c4iw_dev *dev = ((struct seq_file *)file->private_data)->private;
+       int i;
+
+       if (dev->rdev.wr_log)
+               for (i = 0; i < dev->rdev.wr_log_size; i++)
+                       dev->rdev.wr_log[i].valid = 0;
+       return count;
+}
+
+static const struct file_operations wr_log_debugfs_fops = {
+       .owner   = THIS_MODULE,
+       .open    = wr_log_open,
+       .release = single_release,
+       .read    = seq_read,
+       .llseek  = seq_lseek,
+       .write   = wr_log_clear,
+};
+
 static int dump_qp(int id, void *p, void *data)
 {
        struct c4iw_qp *qp = p;
@@ -241,12 +362,32 @@ static int dump_stag(int id, void *p, void *data)
        struct c4iw_debugfs_data *stagd = data;
        int space;
        int cc;
+       struct fw_ri_tpte tpte;
+       int ret;
 
        space = stagd->bufsize - stagd->pos - 1;
        if (space == 0)
                return 1;
 
-       cc = snprintf(stagd->buf + stagd->pos, space, "0x%x\n", id<<8);
+       ret = cxgb4_read_tpte(stagd->devp->rdev.lldi.ports[0], (u32)id<<8,
+                             (__be32 *)&tpte);
+       if (ret) {
+               dev_err(&stagd->devp->rdev.lldi.pdev->dev,
+                       "%s cxgb4_read_tpte err %d\n", __func__, ret);
+               return ret;
+       }
+       cc = snprintf(stagd->buf + stagd->pos, space,
+                     "stag: idx 0x%x valid %d key 0x%x state %d pdid %d "
+                     "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+                     (u32)id<<8,
+                     G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+                     G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+                     G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+                     ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+                     ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
        if (cc < space)
                stagd->pos += cc;
        return 0;
@@ -259,7 +400,7 @@ static int stag_release(struct inode *inode, struct file *file)
                printk(KERN_INFO "%s null stagd?\n", __func__);
                return 0;
        }
-       kfree(stagd->buf);
+       vfree(stagd->buf);
        kfree(stagd);
        return 0;
 }
@@ -282,8 +423,8 @@ static int stag_open(struct inode *inode, struct file *file)
        idr_for_each(&stagd->devp->mmidr, count_idrs, &count);
        spin_unlock_irq(&stagd->devp->lock);
 
-       stagd->bufsize = count * sizeof("0x12345678\n");
-       stagd->buf = kmalloc(stagd->bufsize, GFP_KERNEL);
+       stagd->bufsize = count * 256;
+       stagd->buf = vmalloc(stagd->bufsize);
        if (!stagd->buf) {
                ret = -ENOMEM;
                goto err1;
@@ -348,6 +489,7 @@ static int stats_show(struct seq_file *seq, void *v)
                   dev->rdev.stats.act_ofld_conn_fails);
        seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
                   dev->rdev.stats.pas_ofld_conn_fails);
+       seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
        return 0;
 }
 
@@ -583,6 +725,12 @@ static int setup_debugfs(struct c4iw_dev *devp)
        if (de && de->d_inode)
                de->d_inode->i_size = 4096;
 
+       if (c4iw_wr_log) {
+               de = debugfs_create_file("wr_log", S_IWUSR, devp->debugfs_root,
+                                        (void *)devp, &wr_log_debugfs_fops);
+               if (de && de->d_inode)
+                       de->d_inode->i_size = 4096;
+       }
        return 0;
 }
 
@@ -696,6 +844,20 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
                pr_err(MOD "error allocating status page\n");
                goto err4;
        }
+
+       if (c4iw_wr_log) {
+               rdev->wr_log = kzalloc((1 << c4iw_wr_log_size_order) *
+                                      sizeof(*rdev->wr_log), GFP_KERNEL);
+               if (rdev->wr_log) {
+                       rdev->wr_log_size = 1 << c4iw_wr_log_size_order;
+                       atomic_set(&rdev->wr_log_idx, 0);
+               } else {
+                       pr_err(MOD "error allocating wr_log. Logging disabled\n");
+               }
+       }
+
+       rdev->status_page->db_off = 0;
+
        return 0;
 err4:
        c4iw_rqtpool_destroy(rdev);
@@ -709,6 +871,7 @@ err1:
 
 static void c4iw_rdev_close(struct c4iw_rdev *rdev)
 {
+       kfree(rdev->wr_log);
        free_page((unsigned long)rdev->status_page);
        c4iw_pblpool_destroy(rdev);
        c4iw_rqtpool_destroy(rdev);
@@ -729,7 +892,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
        if (ctx->dev->rdev.oc_mw_kva)
                iounmap(ctx->dev->rdev.oc_mw_kva);
        ib_dealloc_device(&ctx->dev->ibdev);
-       iwpm_exit(RDMA_NL_C4IW);
        ctx->dev = NULL;
 }
 
@@ -768,6 +930,27 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        }
        devp->rdev.lldi = *infop;
 
+       /* init various hw-queue params based on lld info */
+       PDBG("%s: Ing. padding boundary is %d, egrsstatuspagesize = %d\n",
+            __func__, devp->rdev.lldi.sge_ingpadboundary,
+            devp->rdev.lldi.sge_egrstatuspagesize);
+
+       devp->rdev.hw_queue.t4_eq_status_entries =
+               devp->rdev.lldi.sge_ingpadboundary > 64 ? 2 : 1;
+       devp->rdev.hw_queue.t4_max_eq_size = 65520;
+       devp->rdev.hw_queue.t4_max_iq_size = 65520;
+       devp->rdev.hw_queue.t4_max_rq_size = 8192 -
+               devp->rdev.hw_queue.t4_eq_status_entries - 1;
+       devp->rdev.hw_queue.t4_max_sq_size =
+               devp->rdev.hw_queue.t4_max_eq_size -
+               devp->rdev.hw_queue.t4_eq_status_entries - 1;
+       devp->rdev.hw_queue.t4_max_qp_depth =
+               devp->rdev.hw_queue.t4_max_rq_size;
+       devp->rdev.hw_queue.t4_max_cq_depth =
+               devp->rdev.hw_queue.t4_max_iq_size - 2;
+       devp->rdev.hw_queue.t4_stat_len =
+               devp->rdev.lldi.sge_egrstatuspagesize;
+
        /*
         * For T5 devices, we map all of BAR2 with WC.
         * For T4 devices with onchip qp mem, we map only that part
@@ -818,6 +1001,7 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
        mutex_init(&devp->rdev.stats.lock);
        mutex_init(&devp->db_mutex);
        INIT_LIST_HEAD(&devp->db_fc_list);
+       devp->avail_ird = devp->rdev.lldi.max_ird_adapter;
 
        if (c4iw_debugfs_root) {
                devp->debugfs_root = debugfs_create_dir(
@@ -826,12 +1010,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
                setup_debugfs(devp);
        }
 
-       ret = iwpm_init(RDMA_NL_C4IW);
-       if (ret) {
-               pr_err("port mapper initialization failed with %d\n", ret);
-               ib_dealloc_device(&devp->ibdev);
-               return ERR_PTR(ret);
-       }
 
        return devp;
 }
@@ -1332,6 +1510,15 @@ static int __init c4iw_init_module(void)
                pr_err("%s[%u]: Failed to add netlink callback\n"
                       , __func__, __LINE__);
 
+       err = iwpm_init(RDMA_NL_C4IW);
+       if (err) {
+               pr_err("port mapper initialization failed with %d\n", err);
+               ibnl_remove_client(RDMA_NL_C4IW);
+               c4iw_cm_term();
+               debugfs_remove_recursive(c4iw_debugfs_root);
+               return err;
+       }
+
        cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
 
        return 0;
@@ -1349,6 +1536,7 @@ static void __exit c4iw_exit_module(void)
        }
        mutex_unlock(&dev_mutex);
        cxgb4_unregister_uld(CXGB4_ULD_RDMA);
+       iwpm_exit(RDMA_NL_C4IW);
        ibnl_remove_client(RDMA_NL_C4IW);
        c4iw_cm_term();
        debugfs_remove_recursive(c4iw_debugfs_root);
index d61d0a1..fbe6051 100644 (file)
 
 #include "iw_cxgb4.h"
 
+static void print_tpte(struct c4iw_dev *dev, u32 stag)
+{
+       int ret;
+       struct fw_ri_tpte tpte;
+
+       ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag,
+                             (__be32 *)&tpte);
+       if (ret) {
+               dev_err(&dev->rdev.lldi.pdev->dev,
+                       "%s cxgb4_read_tpte err %d\n", __func__, ret);
+               return;
+       }
+       PDBG("stag idx 0x%x valid %d key 0x%x state %d pdid %d "
+              "perm 0x%x ps %d len 0x%llx va 0x%llx\n",
+              stag & 0xffffff00,
+              G_FW_RI_TPTE_VALID(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_STAGKEY(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_STAGSTATE(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_PDID(ntohl(tpte.valid_to_pdid)),
+              G_FW_RI_TPTE_PERM(ntohl(tpte.locread_to_qpid)),
+              G_FW_RI_TPTE_PS(ntohl(tpte.locread_to_qpid)),
+              ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo),
+              ((u64)ntohl(tpte.va_hi) << 32) | ntohl(tpte.va_lo_fbo));
+}
+
+static void dump_err_cqe(struct c4iw_dev *dev, struct t4_cqe *err_cqe)
+{
+       __be64 *p = (void *)err_cqe;
+
+       dev_err(&dev->rdev.lldi.pdev->dev,
+               "AE qpid %d opcode %d status 0x%x "
+               "type %d len 0x%x wrid.hi 0x%x wrid.lo 0x%x\n",
+               CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
+               CQE_STATUS(err_cqe), CQE_TYPE(err_cqe), ntohl(err_cqe->len),
+               CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+
+       PDBG("%016llx %016llx %016llx %016llx\n",
+            be64_to_cpu(p[0]), be64_to_cpu(p[1]), be64_to_cpu(p[2]),
+            be64_to_cpu(p[3]));
+
+       /*
+        * Ingress WRITE and READ_RESP errors provide
+        * the offending stag, so parse and log it.
+        */
+       if (RQ_TYPE(err_cqe) && (CQE_OPCODE(err_cqe) == FW_RI_RDMA_WRITE ||
+                                CQE_OPCODE(err_cqe) == FW_RI_READ_RESP))
+               print_tpte(dev, CQE_WRID_STAG(err_cqe));
+}
+
 static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
                          struct c4iw_qp *qhp,
                          struct t4_cqe *err_cqe,
@@ -44,11 +93,7 @@ static void post_qp_event(struct c4iw_dev *dev, struct c4iw_cq *chp,
        struct c4iw_qp_attributes attrs;
        unsigned long flag;
 
-       printk(KERN_ERR MOD "AE qpid 0x%x opcode %d status 0x%x "
-              "type %d wrid.hi 0x%x wrid.lo 0x%x\n",
-              CQE_QPID(err_cqe), CQE_OPCODE(err_cqe),
-              CQE_STATUS(err_cqe), CQE_TYPE(err_cqe),
-              CQE_WRID_HI(err_cqe), CQE_WRID_LOW(err_cqe));
+       dump_err_cqe(dev, err_cqe);
 
        if (qhp->attr.state == C4IW_QP_STATE_RTS) {
                attrs.next_state = C4IW_QP_STATE_TERMINATE;
index 125bc5d..b5678ac 100644 (file)
@@ -139,6 +139,29 @@ struct c4iw_stats {
        u64  pas_ofld_conn_fails;
 };
 
+struct c4iw_hw_queue {
+       int t4_eq_status_entries;
+       int t4_max_eq_size;
+       int t4_max_iq_size;
+       int t4_max_rq_size;
+       int t4_max_sq_size;
+       int t4_max_qp_depth;
+       int t4_max_cq_depth;
+       int t4_stat_len;
+};
+
+struct wr_log_entry {
+       struct timespec post_host_ts;
+       struct timespec poll_host_ts;
+       u64 post_sge_ts;
+       u64 cqe_sge_ts;
+       u64 poll_sge_ts;
+       u16 qid;
+       u16 wr_id;
+       u8 opcode;
+       u8 valid;
+};
+
 struct c4iw_rdev {
        struct c4iw_resource resource;
        unsigned long qpshift;
@@ -156,7 +179,11 @@ struct c4iw_rdev {
        unsigned long oc_mw_pa;
        void __iomem *oc_mw_kva;
        struct c4iw_stats stats;
+       struct c4iw_hw_queue hw_queue;
        struct t4_dev_status_page *status_page;
+       atomic_t wr_log_idx;
+       struct wr_log_entry *wr_log;
+       int wr_log_size;
 };
 
 static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
@@ -166,7 +193,7 @@ static inline int c4iw_fatal_error(struct c4iw_rdev *rdev)
 
 static inline int c4iw_num_stags(struct c4iw_rdev *rdev)
 {
-       return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5));
+       return (int)(rdev->lldi.vr->stag.size >> 5);
 }
 
 #define C4IW_WR_TO (30*HZ)
@@ -237,6 +264,7 @@ struct c4iw_dev {
        struct idr atid_idr;
        struct idr stid_idr;
        struct list_head db_fc_list;
+       u32 avail_ird;
 };
 
 static inline struct c4iw_dev *to_c4iw_dev(struct ib_device *ibdev)
@@ -318,6 +346,13 @@ static inline void remove_handle_nolock(struct c4iw_dev *rhp,
        _remove_handle(rhp, idr, id, 0);
 }
 
+extern uint c4iw_max_read_depth;
+
+static inline int cur_max_read_depth(struct c4iw_dev *dev)
+{
+       return min(dev->rdev.lldi.max_ordird_qp, c4iw_max_read_depth);
+}
+
 struct c4iw_pd {
        struct ib_pd ibpd;
        u32 pdid;
@@ -908,7 +943,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
 int c4iw_register_device(struct c4iw_dev *dev);
 void c4iw_unregister_device(struct c4iw_dev *dev);
 int __init c4iw_cm_init(void);
-void __exit c4iw_cm_term(void);
+void c4iw_cm_term(void);
 void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
                               struct c4iw_dev_ucontext *uctx);
 void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
@@ -991,7 +1026,8 @@ void c4iw_ev_dispatch(struct c4iw_dev *dev, struct t4_cqe *err_cqe);
 
 extern struct cxgb4_client t4c_client;
 extern c4iw_handler_func c4iw_handlers[NUM_CPL_CMDS];
-extern int c4iw_max_read_depth;
+extern void c4iw_log_wr_stats(struct t4_wq *wq, struct t4_cqe *cqe);
+extern int c4iw_wr_log;
 extern int db_fc_threshold;
 extern int db_coalescing_threshold;
 extern int use_dsgl;
index b1d3053..72e3b69 100644 (file)
@@ -318,14 +318,16 @@ static int c4iw_query_device(struct ib_device *ibdev,
        props->vendor_id = (u32)dev->rdev.lldi.pdev->vendor;
        props->vendor_part_id = (u32)dev->rdev.lldi.pdev->device;
        props->max_mr_size = T4_MAX_MR_SIZE;
-       props->max_qp = T4_MAX_NUM_QP;
-       props->max_qp_wr = T4_MAX_QP_DEPTH;
+       props->max_qp = dev->rdev.lldi.vr->qp.size / 2;
+       props->max_qp_wr = dev->rdev.hw_queue.t4_max_qp_depth;
        props->max_sge = T4_MAX_RECV_SGE;
        props->max_sge_rd = 1;
-       props->max_qp_rd_atom = c4iw_max_read_depth;
-       props->max_qp_init_rd_atom = c4iw_max_read_depth;
-       props->max_cq = T4_MAX_NUM_CQ;
-       props->max_cqe = T4_MAX_CQ_DEPTH;
+       props->max_res_rd_atom = dev->rdev.lldi.max_ird_adapter;
+       props->max_qp_rd_atom = min(dev->rdev.lldi.max_ordird_qp,
+                                   c4iw_max_read_depth);
+       props->max_qp_init_rd_atom = props->max_qp_rd_atom;
+       props->max_cq = dev->rdev.lldi.vr->qp.size;
+       props->max_cqe = dev->rdev.hw_queue.t4_max_cq_depth;
        props->max_mr = c4iw_num_stags(&dev->rdev);
        props->max_pd = T4_MAX_NUM_PD;
        props->local_ca_ack_delay = 0;
index 086f62f..c158fcc 100644 (file)
@@ -58,6 +58,31 @@ static int max_fr_immd = T4_MAX_FR_IMMD;
 module_param(max_fr_immd, int, 0644);
 MODULE_PARM_DESC(max_fr_immd, "fastreg threshold for using DSGL instead of immedate");
 
+static int alloc_ird(struct c4iw_dev *dev, u32 ird)
+{
+       int ret = 0;
+
+       spin_lock_irq(&dev->lock);
+       if (ird <= dev->avail_ird)
+               dev->avail_ird -= ird;
+       else
+               ret = -ENOMEM;
+       spin_unlock_irq(&dev->lock);
+
+       if (ret)
+               dev_warn(&dev->rdev.lldi.pdev->dev,
+                        "device IRD resources exhausted\n");
+
+       return ret;
+}
+
+static void free_ird(struct c4iw_dev *dev, int ird)
+{
+       spin_lock_irq(&dev->lock);
+       dev->avail_ird += ird;
+       spin_unlock_irq(&dev->lock);
+}
+
 static void set_state(struct c4iw_qp *qhp, enum c4iw_qp_state state)
 {
        unsigned long flag;
@@ -180,9 +205,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        }
 
        /*
-        * RQT must be a power of 2.
+        * RQT must be a power of 2 and at least 16 deep.
         */
-       wq->rq.rqt_size = roundup_pow_of_two(wq->rq.size);
+       wq->rq.rqt_size = roundup_pow_of_two(max_t(u16, wq->rq.size, 16));
        wq->rq.rqt_hwaddr = c4iw_rqtpool_alloc(rdev, wq->rq.rqt_size);
        if (!wq->rq.rqt_hwaddr) {
                ret = -ENOMEM;
@@ -258,7 +283,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        /*
         * eqsize is the number of 64B entries plus the status page size.
         */
-       eqsize = wq->sq.size * T4_SQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+       eqsize = wq->sq.size * T4_SQ_NUM_SLOTS +
+               rdev->hw_queue.t4_eq_status_entries;
 
        res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
                V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
@@ -283,7 +309,8 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
        /*
         * eqsize is the number of 64B entries plus the status page size.
         */
-       eqsize = wq->rq.size * T4_RQ_NUM_SLOTS + T4_EQ_STATUS_ENTRIES;
+       eqsize = wq->rq.size * T4_RQ_NUM_SLOTS +
+               rdev->hw_queue.t4_eq_status_entries;
        res->u.sqrq.fetchszm_to_iqid = cpu_to_be32(
                V_FW_RI_RES_WR_HOSTFCMODE(0) |  /* no host cidx updates */
                V_FW_RI_RES_WR_CPRIO(0) |       /* don't keep in chip cache */
@@ -796,6 +823,11 @@ int c4iw_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                                  qhp->sq_sig_all;
                swsqe->flushed = 0;
                swsqe->wr_id = wr->wr_id;
+               if (c4iw_wr_log) {
+                       swsqe->sge_ts = cxgb4_read_sge_timestamp(
+                                       qhp->rhp->rdev.lldi.ports[0]);
+                       getnstimeofday(&swsqe->host_ts);
+               }
 
                init_wr_hdr(wqe, qhp->wq.sq.pidx, fw_opcode, fw_flags, len16);
 
@@ -859,6 +891,13 @@ int c4iw_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
                }
 
                qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].wr_id = wr->wr_id;
+               if (c4iw_wr_log) {
+                       qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].sge_ts =
+                               cxgb4_read_sge_timestamp(
+                                               qhp->rhp->rdev.lldi.ports[0]);
+                       getnstimeofday(
+                               &qhp->wq.rq.sw_rq[qhp->wq.rq.pidx].host_ts);
+               }
 
                wqe->recv.opcode = FW_RI_RECV_WR;
                wqe->recv.r1 = 0;
@@ -1202,12 +1241,20 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
        int ret;
        struct sk_buff *skb;
 
-       PDBG("%s qhp %p qid 0x%x tid %u\n", __func__, qhp, qhp->wq.sq.qid,
-            qhp->ep->hwtid);
+       PDBG("%s qhp %p qid 0x%x tid %u ird %u ord %u\n", __func__, qhp,
+            qhp->wq.sq.qid, qhp->ep->hwtid, qhp->ep->ird, qhp->ep->ord);
 
        skb = alloc_skb(sizeof *wqe, GFP_KERNEL);
-       if (!skb)
-               return -ENOMEM;
+       if (!skb) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       ret = alloc_ird(rhp, qhp->attr.max_ird);
+       if (ret) {
+               qhp->attr.max_ird = 0;
+               kfree_skb(skb);
+               goto out;
+       }
        set_wr_txq(skb, CPL_PRIORITY_DATA, qhp->ep->txq_idx);
 
        wqe = (struct fw_ri_wr *)__skb_put(skb, sizeof(*wqe));
@@ -1258,10 +1305,14 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
 
        ret = c4iw_ofld_send(&rhp->rdev, skb);
        if (ret)
-               goto out;
+               goto err1;
 
        ret = c4iw_wait_for_reply(&rhp->rdev, &qhp->ep->com.wr_wait,
                                  qhp->ep->hwtid, qhp->wq.sq.qid, __func__);
+       if (!ret)
+               goto out;
+err1:
+       free_ird(rhp, qhp->attr.max_ird);
 out:
        PDBG("%s ret %d\n", __func__, ret);
        return ret;
@@ -1306,7 +1357,7 @@ int c4iw_modify_qp(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
                        newattr.max_ord = attrs->max_ord;
                }
                if (mask & C4IW_QP_ATTR_MAX_IRD) {
-                       if (attrs->max_ird > c4iw_max_read_depth) {
+                       if (attrs->max_ird > cur_max_read_depth(rhp)) {
                                ret = -EINVAL;
                                goto out;
                        }
@@ -1529,6 +1580,7 @@ int c4iw_destroy_qp(struct ib_qp *ib_qp)
        if (!list_empty(&qhp->db_fc_entry))
                list_del_init(&qhp->db_fc_entry);
        spin_unlock_irq(&rhp->lock);
+       free_ird(rhp, qhp->attr.max_ird);
 
        ucontext = ib_qp->uobject ?
                   to_c4iw_ucontext(ib_qp->uobject->context) : NULL;
@@ -1569,13 +1621,17 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        if (attrs->cap.max_inline_data > T4_MAX_SEND_INLINE)
                return ERR_PTR(-EINVAL);
 
-       rqsize = roundup(attrs->cap.max_recv_wr + 1, 16);
-       if (rqsize > T4_MAX_RQ_SIZE)
+       if (attrs->cap.max_recv_wr > rhp->rdev.hw_queue.t4_max_rq_size)
                return ERR_PTR(-E2BIG);
+       rqsize = attrs->cap.max_recv_wr + 1;
+       if (rqsize < 8)
+               rqsize = 8;
 
-       sqsize = roundup(attrs->cap.max_send_wr + 1, 16);
-       if (sqsize > T4_MAX_SQ_SIZE)
+       if (attrs->cap.max_send_wr > rhp->rdev.hw_queue.t4_max_sq_size)
                return ERR_PTR(-E2BIG);
+       sqsize = attrs->cap.max_send_wr + 1;
+       if (sqsize < 8)
+               sqsize = 8;
 
        ucontext = pd->uobject ? to_c4iw_ucontext(pd->uobject->context) : NULL;
 
@@ -1583,19 +1639,20 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        if (!qhp)
                return ERR_PTR(-ENOMEM);
        qhp->wq.sq.size = sqsize;
-       qhp->wq.sq.memsize = (sqsize + 1) * sizeof *qhp->wq.sq.queue;
+       qhp->wq.sq.memsize =
+               (sqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+               sizeof(*qhp->wq.sq.queue) + 16 * sizeof(__be64);
        qhp->wq.sq.flush_cidx = -1;
        qhp->wq.rq.size = rqsize;
-       qhp->wq.rq.memsize = (rqsize + 1) * sizeof *qhp->wq.rq.queue;
+       qhp->wq.rq.memsize =
+               (rqsize + rhp->rdev.hw_queue.t4_eq_status_entries) *
+               sizeof(*qhp->wq.rq.queue);
 
        if (ucontext) {
                qhp->wq.sq.memsize = roundup(qhp->wq.sq.memsize, PAGE_SIZE);
                qhp->wq.rq.memsize = roundup(qhp->wq.rq.memsize, PAGE_SIZE);
        }
 
-       PDBG("%s sqsize %u sqmemsize %zu rqsize %u rqmemsize %zu\n",
-            __func__, sqsize, qhp->wq.sq.memsize, rqsize, qhp->wq.rq.memsize);
-
        ret = create_qp(&rhp->rdev, &qhp->wq, &schp->cq, &rchp->cq,
                        ucontext ? &ucontext->uctx : &rhp->rdev.uctx);
        if (ret)
@@ -1619,8 +1676,8 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->attr.enable_rdma_read = 1;
        qhp->attr.enable_rdma_write = 1;
        qhp->attr.enable_bind = 1;
-       qhp->attr.max_ord = 1;
-       qhp->attr.max_ird = 1;
+       qhp->attr.max_ord = 0;
+       qhp->attr.max_ird = 0;
        qhp->sq_sig_all = attrs->sq_sig_type == IB_SIGNAL_ALL_WR;
        spin_lock_init(&qhp->lock);
        mutex_init(&qhp->mutex);
@@ -1714,9 +1771,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
        qhp->ibqp.qp_num = qhp->wq.sq.qid;
        init_timer(&(qhp->timer));
        INIT_LIST_HEAD(&qhp->db_fc_entry);
-       PDBG("%s qhp %p sq_num_entries %d, rq_num_entries %d qpid 0x%0x\n",
-            __func__, qhp, qhp->attr.sq_num_entries, qhp->attr.rq_num_entries,
-            qhp->wq.sq.qid);
+       PDBG("%s sq id %u size %u memsize %zu num_entries %u "
+            "rq id %u size %u memsize %zu num_entries %u\n", __func__,
+            qhp->wq.sq.qid, qhp->wq.sq.size, qhp->wq.sq.memsize,
+            attrs->cap.max_send_wr, qhp->wq.rq.qid, qhp->wq.rq.size,
+            qhp->wq.rq.memsize, attrs->cap.max_recv_wr);
        return &qhp->ibqp;
 err8:
        kfree(mm5);
@@ -1804,5 +1863,11 @@ int c4iw_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
        memset(attr, 0, sizeof *attr);
        memset(init_attr, 0, sizeof *init_attr);
        attr->qp_state = to_ib_qp_state(qhp->attr.state);
+       init_attr->cap.max_send_wr = qhp->attr.sq_num_entries;
+       init_attr->cap.max_recv_wr = qhp->attr.rq_num_entries;
+       init_attr->cap.max_send_sge = qhp->attr.sq_max_sges;
+       init_attr->cap.max_recv_sge = qhp->attr.sq_max_sges;
+       init_attr->cap.max_inline_data = T4_MAX_SEND_INLINE;
+       init_attr->sq_sig_type = qhp->sq_sig_all ? IB_SIGNAL_ALL_WR : 0;
        return 0;
 }
index 68b0a6b..df5edfa 100644 (file)
 #include "t4_msg.h"
 #include "t4fw_ri_api.h"
 
-#define T4_MAX_NUM_QP 65536
-#define T4_MAX_NUM_CQ 65536
 #define T4_MAX_NUM_PD 65536
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
-#define T4_MAX_EQ_SIZE (65520 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_IQ_SIZE (65520 - 1)
-#define T4_MAX_RQ_SIZE (8192 - T4_EQ_STATUS_ENTRIES)
-#define T4_MAX_SQ_SIZE (T4_MAX_EQ_SIZE - 1)
-#define T4_MAX_QP_DEPTH (T4_MAX_RQ_SIZE - 1)
-#define T4_MAX_CQ_DEPTH (T4_MAX_IQ_SIZE - 1)
-#define T4_MAX_NUM_STAG (1<<15)
 #define T4_MAX_MR_SIZE (~0ULL)
 #define T4_PAGESIZE_MASK 0xffff000  /* 4KB-128MB */
 #define T4_STAG_UNSET 0xffffffff
 #define T4_FW_MAJ 0
-#define T4_EQ_STATUS_ENTRIES (L1_CACHE_BYTES > 64 ? 2 : 1)
 #define A_PCIE_MA_SYNC 0x30b4
 
 struct t4_status_page {
@@ -244,8 +233,8 @@ struct t4_cqe {
 #define CQE_WRID_SQ_IDX(x)     ((x)->u.scqe.cidx)
 
 /* generic accessor macros */
-#define CQE_WRID_HI(x)         ((x)->u.gen.wrid_hi)
-#define CQE_WRID_LOW(x)                ((x)->u.gen.wrid_low)
+#define CQE_WRID_HI(x)         (be32_to_cpu((x)->u.gen.wrid_hi))
+#define CQE_WRID_LOW(x)                (be32_to_cpu((x)->u.gen.wrid_low))
 
 /* macros for flit 3 of the cqe */
 #define S_CQE_GENBIT   63
@@ -277,6 +266,8 @@ struct t4_swsqe {
        int                     signaled;
        u16                     idx;
        int                     flushed;
+       struct timespec         host_ts;
+       u64                     sge_ts;
 };
 
 static inline pgprot_t t4_pgprot_wc(pgprot_t prot)
@@ -314,6 +305,8 @@ struct t4_sq {
 
 struct t4_swrqe {
        u64 wr_id;
+       struct timespec host_ts;
+       u64 sge_ts;
 };
 
 struct t4_rq {
index 91289a0..5709e77 100644 (file)
@@ -849,6 +849,5 @@ enum {                     /* TCP congestion control algorithms */
 #define G_CONG_CNTRL(x) (((x) >> S_CONG_CNTRL) & M_CONG_CNTRL)
 
 #define CONG_CNTRL_VALID   (1 << 18)
-#define T5_OPT_2_VALID       (1 << 31)
 
 #endif /* _T4FW_RI_API_H_ */
index 8ae4f89..e405627 100644 (file)
@@ -180,7 +180,7 @@ static void handle_responder(struct ib_wc *wc, struct mlx5_cqe64 *cqe,
                struct mlx5_core_srq *msrq = NULL;
 
                if (qp->ibqp.xrcd) {
-                       msrq = mlx5_core_get_srq(&dev->mdev,
+                       msrq = mlx5_core_get_srq(dev->mdev,
                                                 be32_to_cpu(cqe->srqn));
                        srq = to_mibsrq(msrq);
                } else {
@@ -348,7 +348,7 @@ static void handle_atomic(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
                           u16 tail, u16 head)
 {
-       int idx;
+       u16 idx;
 
        do {
                idx = tail & (qp->sq.wqe_cnt - 1);
@@ -364,7 +364,7 @@ static void handle_atomics(struct mlx5_ib_qp *qp, struct mlx5_cqe64 *cqe64,
 
 static void free_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf)
 {
-       mlx5_buf_free(&dev->mdev, &buf->buf);
+       mlx5_buf_free(dev->mdev, &buf->buf);
 }
 
 static void get_sig_err_item(struct mlx5_sig_err_cqe *cqe,
@@ -450,7 +450,7 @@ repoll:
                 * because CQs will be locked while QPs are removed
                 * from the table.
                 */
-               mqp = __mlx5_qp_lookup(&dev->mdev, qpn);
+               mqp = __mlx5_qp_lookup(dev->mdev, qpn);
                if (unlikely(!mqp)) {
                        mlx5_ib_warn(dev, "CQE@CQ %06x for unknown QPN %6x\n",
                                     cq->mcq.cqn, qpn);
@@ -514,11 +514,11 @@ repoll:
        case MLX5_CQE_SIG_ERR:
                sig_err_cqe = (struct mlx5_sig_err_cqe *)cqe64;
 
-               read_lock(&dev->mdev.priv.mr_table.lock);
-               mmr = __mlx5_mr_lookup(&dev->mdev,
+               read_lock(&dev->mdev->priv.mr_table.lock);
+               mmr = __mlx5_mr_lookup(dev->mdev,
                                       mlx5_base_mkey(be32_to_cpu(sig_err_cqe->mkey)));
                if (unlikely(!mmr)) {
-                       read_unlock(&dev->mdev.priv.mr_table.lock);
+                       read_unlock(&dev->mdev->priv.mr_table.lock);
                        mlx5_ib_warn(dev, "CQE@CQ %06x for unknown MR %6x\n",
                                     cq->mcq.cqn, be32_to_cpu(sig_err_cqe->mkey));
                        return -EINVAL;
@@ -536,7 +536,7 @@ repoll:
                             mr->sig->err_item.expected,
                             mr->sig->err_item.actual);
 
-               read_unlock(&dev->mdev.priv.mr_table.lock);
+               read_unlock(&dev->mdev->priv.mr_table.lock);
                goto repoll;
        }
 
@@ -575,8 +575,8 @@ int mlx5_ib_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags)
        mlx5_cq_arm(&to_mcq(ibcq)->mcq,
                    (flags & IB_CQ_SOLICITED_MASK) == IB_CQ_SOLICITED ?
                    MLX5_CQ_DB_REQ_NOT_SOL : MLX5_CQ_DB_REQ_NOT,
-                   to_mdev(ibcq->device)->mdev.priv.uuari.uars[0].map,
-                   MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev.priv.cq_uar_lock));
+                   to_mdev(ibcq->device)->mdev->priv.uuari.uars[0].map,
+                   MLX5_GET_DOORBELL_LOCK(&to_mdev(ibcq->device)->mdev->priv.cq_uar_lock));
 
        return 0;
 }
@@ -586,7 +586,7 @@ static int alloc_cq_buf(struct mlx5_ib_dev *dev, struct mlx5_ib_cq_buf *buf,
 {
        int err;
 
-       err = mlx5_buf_alloc(&dev->mdev, nent * cqe_size,
+       err = mlx5_buf_alloc(dev->mdev, nent * cqe_size,
                             PAGE_SIZE * 2, &buf->buf);
        if (err)
                return err;
@@ -691,7 +691,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
 {
        int err;
 
-       err = mlx5_db_alloc(&dev->mdev, &cq->db);
+       err = mlx5_db_alloc(dev->mdev, &cq->db);
        if (err)
                return err;
 
@@ -716,7 +716,7 @@ static int create_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq,
        mlx5_fill_page_array(&cq->buf.buf, (*cqb)->pas);
 
        (*cqb)->ctx.log_pg_sz = cq->buf.buf.page_shift - MLX5_ADAPTER_PAGE_SHIFT;
-       *index = dev->mdev.priv.uuari.uars[0].index;
+       *index = dev->mdev->priv.uuari.uars[0].index;
 
        return 0;
 
@@ -724,14 +724,14 @@ err_buf:
        free_cq_buf(dev, &cq->buf);
 
 err_db:
-       mlx5_db_free(&dev->mdev, &cq->db);
+       mlx5_db_free(dev->mdev, &cq->db);
        return err;
 }
 
 static void destroy_cq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_cq *cq)
 {
        free_cq_buf(dev, &cq->buf);
-       mlx5_db_free(&dev->mdev, &cq->db);
+       mlx5_db_free(dev->mdev, &cq->db);
 }
 
 struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
@@ -752,7 +752,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
                return ERR_PTR(-EINVAL);
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries > dev->mdev.caps.max_cqes)
+       if (entries > dev->mdev->caps.max_cqes)
                return ERR_PTR(-EINVAL);
 
        cq = kzalloc(sizeof(*cq), GFP_KERNEL);
@@ -789,7 +789,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
        cqb->ctx.c_eqn = cpu_to_be16(eqn);
        cqb->ctx.db_record_addr = cpu_to_be64(cq->db.dma);
 
-       err = mlx5_core_create_cq(&dev->mdev, &cq->mcq, cqb, inlen);
+       err = mlx5_core_create_cq(dev->mdev, &cq->mcq, cqb, inlen);
        if (err)
                goto err_cqb;
 
@@ -809,7 +809,7 @@ struct ib_cq *mlx5_ib_create_cq(struct ib_device *ibdev, int entries,
        return &cq->ibcq;
 
 err_cmd:
-       mlx5_core_destroy_cq(&dev->mdev, &cq->mcq);
+       mlx5_core_destroy_cq(dev->mdev, &cq->mcq);
 
 err_cqb:
        mlx5_vfree(cqb);
@@ -834,7 +834,7 @@ int mlx5_ib_destroy_cq(struct ib_cq *cq)
        if (cq->uobject)
                context = cq->uobject->context;
 
-       mlx5_core_destroy_cq(&dev->mdev, &mcq->mcq);
+       mlx5_core_destroy_cq(dev->mdev, &mcq->mcq);
        if (context)
                destroy_cq_user(mcq, context);
        else
@@ -919,7 +919,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
        int err;
        u32 fsel;
 
-       if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
+       if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_CQ_MODER))
                return -ENOSYS;
 
        in = kzalloc(sizeof(*in), GFP_KERNEL);
@@ -931,7 +931,7 @@ int mlx5_ib_modify_cq(struct ib_cq *cq, u16 cq_count, u16 cq_period)
        in->ctx.cq_period = cpu_to_be16(cq_period);
        in->ctx.cq_max_count = cpu_to_be16(cq_count);
        in->field_select = cpu_to_be32(fsel);
-       err = mlx5_core_modify_cq(&dev->mdev, &mcq->mcq, in, sizeof(*in));
+       err = mlx5_core_modify_cq(dev->mdev, &mcq->mcq, in, sizeof(*in));
        kfree(in);
 
        if (err)
@@ -1074,7 +1074,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        int uninitialized_var(cqe_size);
        unsigned long flags;
 
-       if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
+       if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_RESIZE_CQ)) {
                pr_info("Firmware does not support resize CQ\n");
                return -ENOSYS;
        }
@@ -1083,7 +1083,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
                return -EINVAL;
 
        entries = roundup_pow_of_two(entries + 1);
-       if (entries > dev->mdev.caps.max_cqes + 1)
+       if (entries > dev->mdev->caps.max_cqes + 1)
                return -EINVAL;
 
        if (entries == ibcq->cqe + 1)
@@ -1128,7 +1128,7 @@ int mlx5_ib_resize_cq(struct ib_cq *ibcq, int entries, struct ib_udata *udata)
        in->hdr.opmod = cpu_to_be16(MLX5_CQ_OPMOD_RESIZE);
        in->cqn = cpu_to_be32(cq->mcq.cqn);
 
-       err = mlx5_core_modify_cq(&dev->mdev, &cq->mcq, in, inlen);
+       err = mlx5_core_modify_cq(dev->mdev, &cq->mcq, in, inlen);
        if (err)
                goto ex_alloc;
 
index 5c8938b..b514bbb 100644 (file)
@@ -41,7 +41,7 @@ enum {
 };
 
 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
-                int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+                u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
                 void *in_mad, void *response_mad)
 {
        u8 op_modifier = 0;
@@ -54,7 +54,7 @@ int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
        if (ignore_bkey || !in_wc)
                op_modifier |= 0x2;
 
-       return mlx5_core_mad_ifc(&dev->mdev, in_mad, response_mad, op_modifier, port);
+       return mlx5_core_mad_ifc(dev->mdev, in_mad, response_mad, op_modifier, port);
 }
 
 int mlx5_ib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -129,7 +129,7 @@ int mlx5_query_ext_port_caps(struct mlx5_ib_dev *dev, u8 port)
 
        packet_error = be16_to_cpu(out_mad->status);
 
-       dev->mdev.caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
+       dev->mdev->caps.ext_port_cap[port - 1] = (!err && !packet_error) ?
                MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO : 0;
 
 out:
index 364d4b6..d8907b2 100644 (file)
@@ -54,96 +54,17 @@ MODULE_DESCRIPTION("Mellanox Connect-IB HCA IB driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(DRIVER_VERSION);
 
-static int prof_sel = 2;
-module_param_named(prof_sel, prof_sel, int, 0444);
-MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+static int deprecated_prof_sel = 2;
+module_param_named(prof_sel, deprecated_prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Deprecated here. Moved to module mlx5_core");
 
 static char mlx5_version[] =
        DRIVER_NAME ": Mellanox Connect-IB Infiniband driver v"
        DRIVER_VERSION " (" DRIVER_RELDATE ")\n";
 
-static struct mlx5_profile profile[] = {
-       [0] = {
-               .mask           = 0,
-       },
-       [1] = {
-               .mask           = MLX5_PROF_MASK_QP_SIZE,
-               .log_max_qp     = 12,
-       },
-       [2] = {
-               .mask           = MLX5_PROF_MASK_QP_SIZE |
-                                 MLX5_PROF_MASK_MR_CACHE,
-               .log_max_qp     = 17,
-               .mr_cache[0]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[1]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[2]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[3]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[4]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[5]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[6]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[7]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[8]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[9]    = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[10]   = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[11]   = {
-                       .size   = 500,
-                       .limit  = 250
-               },
-               .mr_cache[12]   = {
-                       .size   = 64,
-                       .limit  = 32
-               },
-               .mr_cache[13]   = {
-                       .size   = 32,
-                       .limit  = 16
-               },
-               .mr_cache[14]   = {
-                       .size   = 16,
-                       .limit  = 8
-               },
-               .mr_cache[15]   = {
-                       .size   = 8,
-                       .limit  = 4
-               },
-       },
-};
-
 int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
 {
-       struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
        struct mlx5_eq *eq, *n;
        int err = -ENOENT;
 
@@ -163,7 +84,7 @@ int mlx5_vector2eqn(struct mlx5_ib_dev *dev, int vector, int *eqn, int *irqn)
 
 static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
 {
-       struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
        char name[MLX5_MAX_EQ_NAME];
        struct mlx5_eq *eq, *n;
        int ncomp_vec;
@@ -182,9 +103,9 @@ static int alloc_comp_eqs(struct mlx5_ib_dev *dev)
                }
 
                snprintf(name, MLX5_MAX_EQ_NAME, "mlx5_comp%d", i);
-               err = mlx5_create_map_eq(&dev->mdev, eq,
+               err = mlx5_create_map_eq(dev->mdev, eq,
                                         i + MLX5_EQ_VEC_COMP_BASE, nent, 0,
-                                        name, &dev->mdev.priv.uuari.uars[0]);
+                                        name, &dev->mdev->priv.uuari.uars[0]);
                if (err) {
                        kfree(eq);
                        goto clean;
@@ -204,7 +125,7 @@ clean:
        list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
                list_del(&eq->list);
                spin_unlock(&table->lock);
-               if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+               if (mlx5_destroy_unmap_eq(dev->mdev, eq))
                        mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
                kfree(eq);
                spin_lock(&table->lock);
@@ -215,14 +136,14 @@ clean:
 
 static void free_comp_eqs(struct mlx5_ib_dev *dev)
 {
-       struct mlx5_eq_table *table = &dev->mdev.priv.eq_table;
+       struct mlx5_eq_table *table = &dev->mdev->priv.eq_table;
        struct mlx5_eq *eq, *n;
 
        spin_lock(&table->lock);
        list_for_each_entry_safe(eq, n, &dev->eqs_list, list) {
                list_del(&eq->list);
                spin_unlock(&table->lock);
-               if (mlx5_destroy_unmap_eq(&dev->mdev, eq))
+               if (mlx5_destroy_unmap_eq(dev->mdev, eq))
                        mlx5_ib_warn(dev, "failed to destroy EQ 0x%x\n", eq->eqn);
                kfree(eq);
                spin_lock(&table->lock);
@@ -255,14 +176,14 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
 
        memset(props, 0, sizeof(*props));
 
-       props->fw_ver = ((u64)fw_rev_maj(&dev->mdev) << 32) |
-               (fw_rev_min(&dev->mdev) << 16) |
-               fw_rev_sub(&dev->mdev);
+       props->fw_ver = ((u64)fw_rev_maj(dev->mdev) << 32) |
+               (fw_rev_min(dev->mdev) << 16) |
+               fw_rev_sub(dev->mdev);
        props->device_cap_flags    = IB_DEVICE_CHANGE_PHY_PORT |
                IB_DEVICE_PORT_ACTIVE_EVENT             |
                IB_DEVICE_SYS_IMAGE_GUID                |
                IB_DEVICE_RC_RNR_NAK_GEN;
-       flags = dev->mdev.caps.flags;
+       flags = dev->mdev->caps.flags;
        if (flags & MLX5_DEV_CAP_FLAG_BAD_PKEY_CNTR)
                props->device_cap_flags |= IB_DEVICE_BAD_PKEY_CNTR;
        if (flags & MLX5_DEV_CAP_FLAG_BAD_QKEY_CNTR)
@@ -292,30 +213,30 @@ static int mlx5_ib_query_device(struct ib_device *ibdev,
        memcpy(&props->sys_image_guid, out_mad->data +  4, 8);
 
        props->max_mr_size         = ~0ull;
-       props->page_size_cap       = dev->mdev.caps.min_page_sz;
-       props->max_qp              = 1 << dev->mdev.caps.log_max_qp;
-       props->max_qp_wr           = dev->mdev.caps.max_wqes;
-       max_rq_sg = dev->mdev.caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
-       max_sq_sg = (dev->mdev.caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
+       props->page_size_cap       = dev->mdev->caps.min_page_sz;
+       props->max_qp              = 1 << dev->mdev->caps.log_max_qp;
+       props->max_qp_wr           = dev->mdev->caps.max_wqes;
+       max_rq_sg = dev->mdev->caps.max_rq_desc_sz / sizeof(struct mlx5_wqe_data_seg);
+       max_sq_sg = (dev->mdev->caps.max_sq_desc_sz - sizeof(struct mlx5_wqe_ctrl_seg)) /
                sizeof(struct mlx5_wqe_data_seg);
        props->max_sge = min(max_rq_sg, max_sq_sg);
-       props->max_cq              = 1 << dev->mdev.caps.log_max_cq;
-       props->max_cqe             = dev->mdev.caps.max_cqes - 1;
-       props->max_mr              = 1 << dev->mdev.caps.log_max_mkey;
-       props->max_pd              = 1 << dev->mdev.caps.log_max_pd;
-       props->max_qp_rd_atom      = dev->mdev.caps.max_ra_req_qp;
-       props->max_qp_init_rd_atom = dev->mdev.caps.max_ra_res_qp;
+       props->max_cq              = 1 << dev->mdev->caps.log_max_cq;
+       props->max_cqe             = dev->mdev->caps.max_cqes - 1;
+       props->max_mr              = 1 << dev->mdev->caps.log_max_mkey;
+       props->max_pd              = 1 << dev->mdev->caps.log_max_pd;
+       props->max_qp_rd_atom      = dev->mdev->caps.max_ra_req_qp;
+       props->max_qp_init_rd_atom = dev->mdev->caps.max_ra_res_qp;
        props->max_res_rd_atom     = props->max_qp_rd_atom * props->max_qp;
-       props->max_srq             = 1 << dev->mdev.caps.log_max_srq;
-       props->max_srq_wr          = dev->mdev.caps.max_srq_wqes - 1;
+       props->max_srq             = 1 << dev->mdev->caps.log_max_srq;
+       props->max_srq_wr          = dev->mdev->caps.max_srq_wqes - 1;
        props->max_srq_sge         = max_rq_sg - 1;
        props->max_fast_reg_page_list_len = (unsigned int)-1;
-       props->local_ca_ack_delay  = dev->mdev.caps.local_ca_ack_delay;
+       props->local_ca_ack_delay  = dev->mdev->caps.local_ca_ack_delay;
        props->atomic_cap          = IB_ATOMIC_NONE;
        props->masked_atomic_cap   = IB_ATOMIC_NONE;
        props->max_pkeys           = be16_to_cpup((__be16 *)(out_mad->data + 28));
-       props->max_mcast_grp       = 1 << dev->mdev.caps.log_max_mcg;
-       props->max_mcast_qp_attach = dev->mdev.caps.max_qp_mcg;
+       props->max_mcast_grp       = 1 << dev->mdev->caps.log_max_mcg;
+       props->max_mcast_qp_attach = dev->mdev->caps.max_qp_mcg;
        props->max_total_mcast_qp_attach = props->max_mcast_qp_attach *
                                           props->max_mcast_grp;
        props->max_map_per_fmr = INT_MAX; /* no limit in ConnectIB */
@@ -336,7 +257,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
        int ext_active_speed;
        int err = -ENOMEM;
 
-       if (port < 1 || port > dev->mdev.caps.num_ports) {
+       if (port < 1 || port > dev->mdev->caps.num_ports) {
                mlx5_ib_warn(dev, "invalid port number %d\n", port);
                return -EINVAL;
        }
@@ -367,8 +288,8 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
        props->phys_state       = out_mad->data[33] >> 4;
        props->port_cap_flags   = be32_to_cpup((__be32 *)(out_mad->data + 20));
        props->gid_tbl_len      = out_mad->data[50];
-       props->max_msg_sz       = 1 << to_mdev(ibdev)->mdev.caps.log_max_msg;
-       props->pkey_tbl_len     = to_mdev(ibdev)->mdev.caps.port[port - 1].pkey_table_len;
+       props->max_msg_sz       = 1 << to_mdev(ibdev)->mdev->caps.log_max_msg;
+       props->pkey_tbl_len     = to_mdev(ibdev)->mdev->caps.port[port - 1].pkey_table_len;
        props->bad_pkey_cntr    = be16_to_cpup((__be16 *)(out_mad->data + 46));
        props->qkey_viol_cntr   = be16_to_cpup((__be16 *)(out_mad->data + 48));
        props->active_width     = out_mad->data[31] & 0xf;
@@ -395,7 +316,7 @@ int mlx5_ib_query_port(struct ib_device *ibdev, u8 port,
 
        /* If reported active speed is QDR, check if is FDR-10 */
        if (props->active_speed == 4) {
-               if (dev->mdev.caps.ext_port_cap[port - 1] &
+               if (dev->mdev->caps.ext_port_cap[port - 1] &
                    MLX_EXT_PORT_CAP_FLAG_EXTENDED_PORT_INFO) {
                        init_query_mad(in_mad);
                        in_mad->attr_id = MLX5_ATTR_EXTENDED_PORT_INFO;
@@ -508,7 +429,7 @@ static int mlx5_ib_modify_device(struct ib_device *ibdev, int mask,
         * a 144 trap.  If cmd fails, just ignore.
         */
        memcpy(&in, props->node_desc, 64);
-       err = mlx5_core_access_reg(&dev->mdev, &in, sizeof(in), &out,
+       err = mlx5_core_access_reg(dev->mdev, &in, sizeof(in), &out,
                                   sizeof(out), MLX5_REG_NODE_DESC, 0, 1);
        if (err)
                return err;
@@ -535,7 +456,7 @@ static int mlx5_ib_modify_port(struct ib_device *ibdev, u8 port, int mask,
        tmp = (attr.port_cap_flags | props->set_port_cap_mask) &
                ~props->clr_port_cap_mask;
 
-       err = mlx5_set_port_caps(&dev->mdev, port, tmp);
+       err = mlx5_set_port_caps(dev->mdev, port, tmp);
 
 out:
        mutex_unlock(&dev->cap_mask_mutex);
@@ -557,7 +478,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        int uuarn;
        int err;
        int i;
-       int reqlen;
+       size_t reqlen;
 
        if (!dev->ib_active)
                return ERR_PTR(-EAGAIN);
@@ -591,14 +512,14 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 
        num_uars = req.total_num_uuars / MLX5_NON_FP_BF_REGS_PER_PAGE;
        gross_uuars = num_uars * MLX5_BF_REGS_PER_PAGE;
-       resp.qp_tab_size      = 1 << dev->mdev.caps.log_max_qp;
-       resp.bf_reg_size      = dev->mdev.caps.bf_reg_size;
+       resp.qp_tab_size      = 1 << dev->mdev->caps.log_max_qp;
+       resp.bf_reg_size      = dev->mdev->caps.bf_reg_size;
        resp.cache_line_size  = L1_CACHE_BYTES;
-       resp.max_sq_desc_sz = dev->mdev.caps.max_sq_desc_sz;
-       resp.max_rq_desc_sz = dev->mdev.caps.max_rq_desc_sz;
-       resp.max_send_wqebb = dev->mdev.caps.max_wqes;
-       resp.max_recv_wr = dev->mdev.caps.max_wqes;
-       resp.max_srq_recv_wr = dev->mdev.caps.max_srq_wqes;
+       resp.max_sq_desc_sz = dev->mdev->caps.max_sq_desc_sz;
+       resp.max_rq_desc_sz = dev->mdev->caps.max_rq_desc_sz;
+       resp.max_send_wqebb = dev->mdev->caps.max_wqes;
+       resp.max_recv_wr = dev->mdev->caps.max_wqes;
+       resp.max_srq_recv_wr = dev->mdev->caps.max_srq_wqes;
 
        context = kzalloc(sizeof(*context), GFP_KERNEL);
        if (!context)
@@ -635,7 +556,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        }
 
        for (i = 0; i < num_uars; i++) {
-               err = mlx5_cmd_alloc_uar(&dev->mdev, &uars[i].index);
+               err = mlx5_cmd_alloc_uar(dev->mdev, &uars[i].index);
                if (err)
                        goto out_count;
        }
@@ -644,7 +565,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
        mutex_init(&context->db_page_mutex);
 
        resp.tot_uuars = req.total_num_uuars;
-       resp.num_ports = dev->mdev.caps.num_ports;
+       resp.num_ports = dev->mdev->caps.num_ports;
        err = ib_copy_to_udata(udata, &resp,
                               sizeof(resp) - sizeof(resp.reserved));
        if (err)
@@ -658,7 +579,7 @@ static struct ib_ucontext *mlx5_ib_alloc_ucontext(struct ib_device *ibdev,
 
 out_uars:
        for (i--; i >= 0; i--)
-               mlx5_cmd_free_uar(&dev->mdev, uars[i].index);
+               mlx5_cmd_free_uar(dev->mdev, uars[i].index);
 out_count:
        kfree(uuari->count);
 
@@ -681,7 +602,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
        int i;
 
        for (i = 0; i < uuari->num_uars; i++) {
-               if (mlx5_cmd_free_uar(&dev->mdev, uuari->uars[i].index))
+               if (mlx5_cmd_free_uar(dev->mdev, uuari->uars[i].index))
                        mlx5_ib_warn(dev, "failed to free UAR 0x%x\n", uuari->uars[i].index);
        }
 
@@ -695,7 +616,7 @@ static int mlx5_ib_dealloc_ucontext(struct ib_ucontext *ibcontext)
 
 static phys_addr_t uar_index2pfn(struct mlx5_ib_dev *dev, int index)
 {
-       return (pci_resource_start(dev->mdev.pdev, 0) >> PAGE_SHIFT) + index;
+       return (pci_resource_start(dev->mdev->pdev, 0) >> PAGE_SHIFT) + index;
 }
 
 static int get_command(unsigned long offset)
@@ -773,7 +694,7 @@ static int alloc_pa_mkey(struct mlx5_ib_dev *dev, u32 *key, u32 pdn)
        seg->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
        seg->start_addr = 0;
 
-       err = mlx5_core_create_mkey(&dev->mdev, &mr, in, sizeof(*in),
+       err = mlx5_core_create_mkey(dev->mdev, &mr, in, sizeof(*in),
                                    NULL, NULL, NULL);
        if (err) {
                mlx5_ib_warn(dev, "failed to create mkey, %d\n", err);
@@ -798,7 +719,7 @@ static void free_pa_mkey(struct mlx5_ib_dev *dev, u32 key)
 
        memset(&mr, 0, sizeof(mr));
        mr.key = key;
-       err = mlx5_core_destroy_mkey(&dev->mdev, &mr);
+       err = mlx5_core_destroy_mkey(dev->mdev, &mr);
        if (err)
                mlx5_ib_warn(dev, "failed to destroy mkey 0x%x\n", key);
 }
@@ -815,7 +736,7 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       err = mlx5_core_alloc_pd(&to_mdev(ibdev)->mdev, &pd->pdn);
+       err = mlx5_core_alloc_pd(to_mdev(ibdev)->mdev, &pd->pdn);
        if (err) {
                kfree(pd);
                return ERR_PTR(err);
@@ -824,14 +745,14 @@ static struct ib_pd *mlx5_ib_alloc_pd(struct ib_device *ibdev,
        if (context) {
                resp.pdn = pd->pdn;
                if (ib_copy_to_udata(udata, &resp, sizeof(resp))) {
-                       mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+                       mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
                        kfree(pd);
                        return ERR_PTR(-EFAULT);
                }
        } else {
                err = alloc_pa_mkey(to_mdev(ibdev), &pd->pa_lkey, pd->pdn);
                if (err) {
-                       mlx5_core_dealloc_pd(&to_mdev(ibdev)->mdev, pd->pdn);
+                       mlx5_core_dealloc_pd(to_mdev(ibdev)->mdev, pd->pdn);
                        kfree(pd);
                        return ERR_PTR(err);
                }
@@ -848,7 +769,7 @@ static int mlx5_ib_dealloc_pd(struct ib_pd *pd)
        if (!pd->uobject)
                free_pa_mkey(mdev, mpd->pa_lkey);
 
-       mlx5_core_dealloc_pd(&mdev->mdev, mpd->pdn);
+       mlx5_core_dealloc_pd(mdev->mdev, mpd->pdn);
        kfree(mpd);
 
        return 0;
@@ -859,7 +780,7 @@ static int mlx5_ib_mcg_attach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
        int err;
 
-       err = mlx5_core_attach_mcg(&dev->mdev, gid, ibqp->qp_num);
+       err = mlx5_core_attach_mcg(dev->mdev, gid, ibqp->qp_num);
        if (err)
                mlx5_ib_warn(dev, "failed attaching QPN 0x%x, MGID %pI6\n",
                             ibqp->qp_num, gid->raw);
@@ -872,7 +793,7 @@ static int mlx5_ib_mcg_detach(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
        int err;
 
-       err = mlx5_core_detach_mcg(&dev->mdev, gid, ibqp->qp_num);
+       err = mlx5_core_detach_mcg(dev->mdev, gid, ibqp->qp_num);
        if (err)
                mlx5_ib_warn(dev, "failed detaching QPN 0x%x, MGID %pI6\n",
                             ibqp->qp_num, gid->raw);
@@ -906,7 +827,7 @@ static int init_node_data(struct mlx5_ib_dev *dev)
        if (err)
                goto out;
 
-       dev->mdev.rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
+       dev->mdev->rev_id = be32_to_cpup((__be32 *)(out_mad->data + 32));
        memcpy(&dev->ib_dev.node_guid, out_mad->data + 12, 8);
 
 out:
@@ -921,7 +842,7 @@ static ssize_t show_fw_pages(struct device *device, struct device_attribute *att
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 
-       return sprintf(buf, "%d\n", dev->mdev.priv.fw_pages);
+       return sprintf(buf, "%d\n", dev->mdev->priv.fw_pages);
 }
 
 static ssize_t show_reg_pages(struct device *device,
@@ -930,7 +851,7 @@ static ssize_t show_reg_pages(struct device *device,
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
 
-       return sprintf(buf, "%d\n", dev->mdev.priv.reg_pages);
+       return sprintf(buf, "%d\n", dev->mdev->priv.reg_pages);
 }
 
 static ssize_t show_hca(struct device *device, struct device_attribute *attr,
@@ -938,7 +859,7 @@ static ssize_t show_hca(struct device *device, struct device_attribute *attr,
 {
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "MT%d\n", dev->mdev.pdev->device);
+       return sprintf(buf, "MT%d\n", dev->mdev->pdev->device);
 }
 
 static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
@@ -946,8 +867,8 @@ static ssize_t show_fw_ver(struct device *device, struct device_attribute *attr,
 {
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(&dev->mdev),
-                      fw_rev_min(&dev->mdev), fw_rev_sub(&dev->mdev));
+       return sprintf(buf, "%d.%d.%d\n", fw_rev_maj(dev->mdev),
+                      fw_rev_min(dev->mdev), fw_rev_sub(dev->mdev));
 }
 
 static ssize_t show_rev(struct device *device, struct device_attribute *attr,
@@ -955,7 +876,7 @@ static ssize_t show_rev(struct device *device, struct device_attribute *attr,
 {
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
-       return sprintf(buf, "%x\n", dev->mdev.rev_id);
+       return sprintf(buf, "%x\n", dev->mdev->rev_id);
 }
 
 static ssize_t show_board(struct device *device, struct device_attribute *attr,
@@ -964,7 +885,7 @@ static ssize_t show_board(struct device *device, struct device_attribute *attr,
        struct mlx5_ib_dev *dev =
                container_of(device, struct mlx5_ib_dev, ib_dev.dev);
        return sprintf(buf, "%.*s\n", MLX5_BOARD_ID_LEN,
-                      dev->mdev.board_id);
+                      dev->mdev->board_id);
 }
 
 static DEVICE_ATTR(hw_rev,   S_IRUGO, show_rev,    NULL);
@@ -983,11 +904,12 @@ static struct device_attribute *mlx5_class_attributes[] = {
        &dev_attr_reg_pages,
 };
 
-static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
-                         void *data)
+static void mlx5_ib_event(struct mlx5_core_dev *dev, void *context,
+                         enum mlx5_dev_event event, unsigned long param)
 {
-       struct mlx5_ib_dev *ibdev = container_of(dev, struct mlx5_ib_dev, mdev);
+       struct mlx5_ib_dev *ibdev = (struct mlx5_ib_dev *)context;
        struct ib_event ibev;
+
        u8 port = 0;
 
        switch (event) {
@@ -998,12 +920,12 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
 
        case MLX5_DEV_EVENT_PORT_UP:
                ibev.event = IB_EVENT_PORT_ACTIVE;
-               port = *(u8 *)data;
+               port = (u8)param;
                break;
 
        case MLX5_DEV_EVENT_PORT_DOWN:
                ibev.event = IB_EVENT_PORT_ERR;
-               port = *(u8 *)data;
+               port = (u8)param;
                break;
 
        case MLX5_DEV_EVENT_PORT_INITIALIZED:
@@ -1012,22 +934,22 @@ static void mlx5_ib_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
 
        case MLX5_DEV_EVENT_LID_CHANGE:
                ibev.event = IB_EVENT_LID_CHANGE;
-               port = *(u8 *)data;
+               port = (u8)param;
                break;
 
        case MLX5_DEV_EVENT_PKEY_CHANGE:
                ibev.event = IB_EVENT_PKEY_CHANGE;
-               port = *(u8 *)data;
+               port = (u8)param;
                break;
 
        case MLX5_DEV_EVENT_GUID_CHANGE:
                ibev.event = IB_EVENT_GID_CHANGE;
-               port = *(u8 *)data;
+               port = (u8)param;
                break;
 
        case MLX5_DEV_EVENT_CLIENT_REREG:
                ibev.event = IB_EVENT_CLIENT_REREGISTER;
-               port = *(u8 *)data;
+               port = (u8)param;
                break;
        }
 
@@ -1047,7 +969,7 @@ static void get_ext_port_caps(struct mlx5_ib_dev *dev)
 {
        int port;
 
-       for (port = 1; port <= dev->mdev.caps.num_ports; port++)
+       for (port = 1; port <= dev->mdev->caps.num_ports; port++)
                mlx5_query_ext_port_caps(dev, port);
 }
 
@@ -1072,14 +994,14 @@ static int get_port_caps(struct mlx5_ib_dev *dev)
                goto out;
        }
 
-       for (port = 1; port <= dev->mdev.caps.num_ports; port++) {
+       for (port = 1; port <= dev->mdev->caps.num_ports; port++) {
                err = mlx5_ib_query_port(&dev->ib_dev, port, pprops);
                if (err) {
                        mlx5_ib_warn(dev, "query_port %d failed %d\n", port, err);
                        break;
                }
-               dev->mdev.caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
-               dev->mdev.caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
+               dev->mdev->caps.port[port - 1].pkey_table_len = dprops->max_pkeys;
+               dev->mdev->caps.port[port - 1].gid_table_len = pprops->gid_tbl_len;
                mlx5_ib_dbg(dev, "pkey_table_len %d, gid_table_len %d\n",
                            dprops->max_pkeys, pprops->gid_tbl_len);
        }
@@ -1328,10 +1250,8 @@ static void destroy_dev_resources(struct mlx5_ib_resources *devr)
        mlx5_ib_dealloc_pd(devr->p0);
 }
 
-static int init_one(struct pci_dev *pdev,
-                   const struct pci_device_id *id)
+static void *mlx5_ib_add(struct mlx5_core_dev *mdev)
 {
-       struct mlx5_core_dev *mdev;
        struct mlx5_ib_dev *dev;
        int err;
        int i;
@@ -1340,28 +1260,19 @@ static int init_one(struct pci_dev *pdev,
 
        dev = (struct mlx5_ib_dev *)ib_alloc_device(sizeof(*dev));
        if (!dev)
-               return -ENOMEM;
+               return NULL;
 
-       mdev = &dev->mdev;
-       mdev->event = mlx5_ib_event;
-       if (prof_sel >= ARRAY_SIZE(profile)) {
-               pr_warn("selected pofile out of range, selceting default\n");
-               prof_sel = 0;
-       }
-       mdev->profile = &profile[prof_sel];
-       err = mlx5_dev_init(mdev, pdev);
-       if (err)
-               goto err_free;
+       dev->mdev = mdev;
 
        err = get_port_caps(dev);
        if (err)
-               goto err_cleanup;
+               goto err_dealloc;
 
        get_ext_port_caps(dev);
 
        err = alloc_comp_eqs(dev);
        if (err)
-               goto err_cleanup;
+               goto err_dealloc;
 
        MLX5_INIT_DOORBELL_LOCK(&dev->uar_lock);
 
@@ -1480,7 +1391,7 @@ static int init_one(struct pci_dev *pdev,
 
        dev->ib_active = true;
 
-       return 0;
+       return dev;
 
 err_umrc:
        destroy_umrc_res(dev);
@@ -1494,49 +1405,39 @@ err_rsrc:
 err_eqs:
        free_comp_eqs(dev);
 
-err_cleanup:
-       mlx5_dev_cleanup(mdev);
-
-err_free:
+err_dealloc:
        ib_dealloc_device((struct ib_device *)dev);
 
-       return err;
+       return NULL;
 }
 
-static void remove_one(struct pci_dev *pdev)
+static void mlx5_ib_remove(struct mlx5_core_dev *mdev, void *context)
 {
-       struct mlx5_ib_dev *dev = mlx5_pci2ibdev(pdev);
-
+       struct mlx5_ib_dev *dev = context;
        destroy_umrc_res(dev);
        ib_unregister_device(&dev->ib_dev);
        destroy_dev_resources(&dev->devr);
        free_comp_eqs(dev);
-       mlx5_dev_cleanup(&dev->mdev);
        ib_dealloc_device(&dev->ib_dev);
 }
 
-static DEFINE_PCI_DEVICE_TABLE(mlx5_ib_pci_table) = {
-       { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
-       { 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, mlx5_ib_pci_table);
-
-static struct pci_driver mlx5_ib_driver = {
-       .name           = DRIVER_NAME,
-       .id_table       = mlx5_ib_pci_table,
-       .probe          = init_one,
-       .remove         = remove_one
+static struct mlx5_interface mlx5_ib_interface = {
+       .add            = mlx5_ib_add,
+       .remove         = mlx5_ib_remove,
+       .event          = mlx5_ib_event,
 };
 
 static int __init mlx5_ib_init(void)
 {
-       return pci_register_driver(&mlx5_ib_driver);
+       if (deprecated_prof_sel != 2)
+               pr_warn("prof_sel is deprecated for mlx5_ib, set it for mlx5_core\n");
+
+       return mlx5_register_interface(&mlx5_ib_interface);
 }
 
 static void __exit mlx5_ib_cleanup(void)
 {
-       pci_unregister_driver(&mlx5_ib_driver);
+       mlx5_unregister_interface(&mlx5_ib_interface);
 }
 
 module_init(mlx5_ib_init);
index 8499aec..a3e8144 100644 (file)
@@ -148,7 +148,7 @@ int mlx5_ib_get_buf_offset(u64 addr, int page_shift, u32 *offset)
        u64 off_mask;
        u64 buf_off;
 
-       page_size = 1 << page_shift;
+       page_size = (u64)1 << page_shift;
        page_mask = page_size - 1;
        buf_off = addr & page_mask;
        off_size = page_size >> 6;
index f2ccf1a..386780f 100644 (file)
@@ -360,7 +360,7 @@ struct mlx5_ib_resources {
 
 struct mlx5_ib_dev {
        struct ib_device                ib_dev;
-       struct mlx5_core_dev            mdev;
+       struct mlx5_core_dev            *mdev;
        MLX5_DECLARE_DOORBELL_LOCK(uar_lock);
        struct list_head                eqs_list;
        int                             num_ports;
@@ -454,16 +454,6 @@ static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
        return container_of(ibah, struct mlx5_ib_ah, ibah);
 }
 
-static inline struct mlx5_ib_dev *mlx5_core2ibdev(struct mlx5_core_dev *dev)
-{
-       return container_of(dev, struct mlx5_ib_dev, mdev);
-}
-
-static inline struct mlx5_ib_dev *mlx5_pci2ibdev(struct pci_dev *pdev)
-{
-       return mlx5_core2ibdev(pci2mlx5_core_dev(pdev));
-}
-
 int mlx5_ib_db_map_user(struct mlx5_ib_ucontext *context, unsigned long virt,
                        struct mlx5_db *db);
 void mlx5_ib_db_unmap_user(struct mlx5_ib_ucontext *context, struct mlx5_db *db);
@@ -471,7 +461,7 @@ void __mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq)
 void mlx5_ib_cq_clean(struct mlx5_ib_cq *cq, u32 qpn, struct mlx5_ib_srq *srq);
 void mlx5_ib_free_srq_wqe(struct mlx5_ib_srq *srq, int wqe_index);
 int mlx5_MAD_IFC(struct mlx5_ib_dev *dev, int ignore_mkey, int ignore_bkey,
-                int port, struct ib_wc *in_wc, struct ib_grh *in_grh,
+                u8 port, struct ib_wc *in_wc, struct ib_grh *in_grh,
                 void *in_mad, void *response_mad);
 struct ib_ah *create_ib_ah(struct ib_ah_attr *ah_attr,
                           struct mlx5_ib_ah *ah);
index afa873b..80b3c63 100644 (file)
@@ -73,7 +73,7 @@ static void reg_mr_callback(int status, void *context)
        struct mlx5_cache_ent *ent = &cache->ent[c];
        u8 key;
        unsigned long flags;
-       struct mlx5_mr_table *table = &dev->mdev.priv.mr_table;
+       struct mlx5_mr_table *table = &dev->mdev->priv.mr_table;
        int err;
 
        spin_lock_irqsave(&ent->lock, flags);
@@ -97,9 +97,9 @@ static void reg_mr_callback(int status, void *context)
                return;
        }
 
-       spin_lock_irqsave(&dev->mdev.priv.mkey_lock, flags);
-       key = dev->mdev.priv.mkey_key++;
-       spin_unlock_irqrestore(&dev->mdev.priv.mkey_lock, flags);
+       spin_lock_irqsave(&dev->mdev->priv.mkey_lock, flags);
+       key = dev->mdev->priv.mkey_key++;
+       spin_unlock_irqrestore(&dev->mdev->priv.mkey_lock, flags);
        mr->mmr.key = mlx5_idx_to_mkey(be32_to_cpu(mr->out.mkey) & 0xffffff) | key;
 
        cache->last_add = jiffies;
@@ -155,7 +155,7 @@ static int add_keys(struct mlx5_ib_dev *dev, int c, int num)
                spin_lock_irq(&ent->lock);
                ent->pending++;
                spin_unlock_irq(&ent->lock);
-               err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in,
+               err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in,
                                            sizeof(*in), reg_mr_callback,
                                            mr, &mr->out);
                if (err) {
@@ -188,7 +188,7 @@ static void remove_keys(struct mlx5_ib_dev *dev, int c, int num)
                ent->cur--;
                ent->size--;
                spin_unlock_irq(&ent->lock);
-               err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+               err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
                if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
                else
@@ -479,7 +479,7 @@ static void clean_keys(struct mlx5_ib_dev *dev, int c)
                ent->cur--;
                ent->size--;
                spin_unlock_irq(&ent->lock);
-               err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+               err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
                if (err)
                        mlx5_ib_warn(dev, "failed destroy mkey\n");
                else
@@ -496,7 +496,7 @@ static int mlx5_mr_cache_debugfs_init(struct mlx5_ib_dev *dev)
        if (!mlx5_debugfs_root)
                return 0;
 
-       cache->root = debugfs_create_dir("mr_cache", dev->mdev.priv.dbg_root);
+       cache->root = debugfs_create_dir("mr_cache", dev->mdev->priv.dbg_root);
        if (!cache->root)
                return -ENOMEM;
 
@@ -571,8 +571,8 @@ int mlx5_mr_cache_init(struct mlx5_ib_dev *dev)
                ent->order = i + 2;
                ent->dev = dev;
 
-               if (dev->mdev.profile->mask & MLX5_PROF_MASK_MR_CACHE)
-                       limit = dev->mdev.profile->mr_cache[i].limit;
+               if (dev->mdev->profile->mask & MLX5_PROF_MASK_MR_CACHE)
+                       limit = dev->mdev->profile->mr_cache[i].limit;
                else
                        limit = 0;
 
@@ -610,7 +610,7 @@ int mlx5_mr_cache_cleanup(struct mlx5_ib_dev *dev)
 struct ib_mr *mlx5_ib_get_dma_mr(struct ib_pd *pd, int acc)
 {
        struct mlx5_ib_dev *dev = to_mdev(pd->device);
-       struct mlx5_core_dev *mdev = &dev->mdev;
+       struct mlx5_core_dev *mdev = dev->mdev;
        struct mlx5_create_mkey_mbox_in *in;
        struct mlx5_mkey_seg *seg;
        struct mlx5_ib_mr *mr;
@@ -846,7 +846,7 @@ static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, u64 virt_addr,
        in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
        in->xlat_oct_act_size = cpu_to_be32(get_octo_len(virt_addr, length,
                                                         1 << page_shift));
-       err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, inlen, NULL,
+       err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, inlen, NULL,
                                    NULL, NULL);
        if (err) {
                mlx5_ib_warn(dev, "create mkey failed\n");
@@ -923,7 +923,7 @@ struct ib_mr *mlx5_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
        mr->umem = umem;
        mr->npages = npages;
        spin_lock(&dev->mr_lock);
-       dev->mdev.priv.reg_pages += npages;
+       dev->mdev->priv.reg_pages += npages;
        spin_unlock(&dev->mr_lock);
        mr->ibmr.lkey = mr->mmr.key;
        mr->ibmr.rkey = mr->mmr.key;
@@ -978,7 +978,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
        int err;
 
        if (!umred) {
-               err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+               err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
                if (err) {
                        mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
                                     mr->mmr.key, err);
@@ -996,7 +996,7 @@ int mlx5_ib_dereg_mr(struct ib_mr *ibmr)
        if (umem) {
                ib_umem_release(umem);
                spin_lock(&dev->mr_lock);
-               dev->mdev.priv.reg_pages -= npages;
+               dev->mdev->priv.reg_pages -= npages;
                spin_unlock(&dev->mr_lock);
        }
 
@@ -1044,7 +1044,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
                }
 
                /* create mem & wire PSVs */
-               err = mlx5_core_create_psv(&dev->mdev, to_mpd(pd)->pdn,
+               err = mlx5_core_create_psv(dev->mdev, to_mpd(pd)->pdn,
                                           2, psv_index);
                if (err)
                        goto err_free_sig;
@@ -1060,7 +1060,7 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
        }
 
        in->seg.flags = MLX5_PERM_UMR_EN | access_mode;
-       err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in),
+       err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in),
                                    NULL, NULL, NULL);
        if (err)
                goto err_destroy_psv;
@@ -1074,11 +1074,11 @@ struct ib_mr *mlx5_ib_create_mr(struct ib_pd *pd,
 
 err_destroy_psv:
        if (mr->sig) {
-               if (mlx5_core_destroy_psv(&dev->mdev,
+               if (mlx5_core_destroy_psv(dev->mdev,
                                          mr->sig->psv_memory.psv_idx))
                        mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
                                     mr->sig->psv_memory.psv_idx);
-               if (mlx5_core_destroy_psv(&dev->mdev,
+               if (mlx5_core_destroy_psv(dev->mdev,
                                          mr->sig->psv_wire.psv_idx))
                        mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
                                     mr->sig->psv_wire.psv_idx);
@@ -1099,18 +1099,18 @@ int mlx5_ib_destroy_mr(struct ib_mr *ibmr)
        int err;
 
        if (mr->sig) {
-               if (mlx5_core_destroy_psv(&dev->mdev,
+               if (mlx5_core_destroy_psv(dev->mdev,
                                          mr->sig->psv_memory.psv_idx))
                        mlx5_ib_warn(dev, "failed to destroy mem psv %d\n",
                                     mr->sig->psv_memory.psv_idx);
-               if (mlx5_core_destroy_psv(&dev->mdev,
+               if (mlx5_core_destroy_psv(dev->mdev,
                                          mr->sig->psv_wire.psv_idx))
                        mlx5_ib_warn(dev, "failed to destroy wire psv %d\n",
                                     mr->sig->psv_wire.psv_idx);
                kfree(mr->sig);
        }
 
-       err = mlx5_core_destroy_mkey(&dev->mdev, &mr->mmr);
+       err = mlx5_core_destroy_mkey(dev->mdev, &mr->mmr);
        if (err) {
                mlx5_ib_warn(dev, "failed to destroy mkey 0x%x (%d)\n",
                             mr->mmr.key, err);
@@ -1149,7 +1149,7 @@ struct ib_mr *mlx5_ib_alloc_fast_reg_mr(struct ib_pd *pd,
         * TBD not needed - issue 197292 */
        in->seg.log2_page_size = PAGE_SHIFT;
 
-       err = mlx5_core_create_mkey(&dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
+       err = mlx5_core_create_mkey(dev->mdev, &mr->mmr, in, sizeof(*in), NULL,
                                    NULL, NULL);
        kfree(in);
        if (err)
@@ -1202,7 +1202,7 @@ void mlx5_ib_free_fast_reg_page_list(struct ib_fast_reg_page_list *page_list)
        struct mlx5_ib_dev *dev = to_mdev(page_list->device);
        int size = page_list->max_page_list_len * sizeof(u64);
 
-       dma_free_coherent(&dev->mdev.pdev->dev, size, mfrpl->mapped_page_list,
+       dma_free_coherent(&dev->mdev->pdev->dev, size, mfrpl->mapped_page_list,
                          mfrpl->map);
        kfree(mfrpl->ibfrpl.page_list);
        kfree(mfrpl);
index d13ddf1..7efe6e3 100644 (file)
@@ -162,7 +162,7 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
        int wq_size;
 
        /* Sanity check RQ size before proceeding */
-       if (cap->max_recv_wr  > dev->mdev.caps.max_wqes)
+       if (cap->max_recv_wr  > dev->mdev->caps.max_wqes)
                return -EINVAL;
 
        if (!has_rq) {
@@ -182,10 +182,10 @@ static int set_rq_size(struct mlx5_ib_dev *dev, struct ib_qp_cap *cap,
                        wq_size = roundup_pow_of_two(cap->max_recv_wr) * wqe_size;
                        wq_size = max_t(int, wq_size, MLX5_SEND_WQE_BB);
                        qp->rq.wqe_cnt = wq_size / wqe_size;
-                       if (wqe_size > dev->mdev.caps.max_rq_desc_sz) {
+                       if (wqe_size > dev->mdev->caps.max_rq_desc_sz) {
                                mlx5_ib_dbg(dev, "wqe_size %d, max %d\n",
                                            wqe_size,
-                                           dev->mdev.caps.max_rq_desc_sz);
+                                           dev->mdev->caps.max_rq_desc_sz);
                                return -EINVAL;
                        }
                        qp->rq.wqe_shift = ilog2(wqe_size);
@@ -277,9 +277,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
        if (wqe_size < 0)
                return wqe_size;
 
-       if (wqe_size > dev->mdev.caps.max_sq_desc_sz) {
+       if (wqe_size > dev->mdev->caps.max_sq_desc_sz) {
                mlx5_ib_dbg(dev, "wqe_size(%d) > max_sq_desc_sz(%d)\n",
-                           wqe_size, dev->mdev.caps.max_sq_desc_sz);
+                           wqe_size, dev->mdev->caps.max_sq_desc_sz);
                return -EINVAL;
        }
 
@@ -292,9 +292,9 @@ static int calc_sq_size(struct mlx5_ib_dev *dev, struct ib_qp_init_attr *attr,
 
        wq_size = roundup_pow_of_two(attr->cap.max_send_wr * wqe_size);
        qp->sq.wqe_cnt = wq_size / MLX5_SEND_WQE_BB;
-       if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+       if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
                mlx5_ib_dbg(dev, "wqe count(%d) exceeds limits(%d)\n",
-                           qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+                           qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
                return -ENOMEM;
        }
        qp->sq.wqe_shift = ilog2(MLX5_SEND_WQE_BB);
@@ -311,9 +311,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
 {
        int desc_sz = 1 << qp->sq.wqe_shift;
 
-       if (desc_sz > dev->mdev.caps.max_sq_desc_sz) {
+       if (desc_sz > dev->mdev->caps.max_sq_desc_sz) {
                mlx5_ib_warn(dev, "desc_sz %d, max_sq_desc_sz %d\n",
-                            desc_sz, dev->mdev.caps.max_sq_desc_sz);
+                            desc_sz, dev->mdev->caps.max_sq_desc_sz);
                return -EINVAL;
        }
 
@@ -325,9 +325,9 @@ static int set_user_buf_size(struct mlx5_ib_dev *dev,
 
        qp->sq.wqe_cnt = ucmd->sq_wqe_count;
 
-       if (qp->sq.wqe_cnt > dev->mdev.caps.max_wqes) {
+       if (qp->sq.wqe_cnt > dev->mdev->caps.max_wqes) {
                mlx5_ib_warn(dev, "wqe_cnt %d, max_wqes %d\n",
-                            qp->sq.wqe_cnt, dev->mdev.caps.max_wqes);
+                            qp->sq.wqe_cnt, dev->mdev->caps.max_wqes);
                return -EINVAL;
        }
 
@@ -674,8 +674,8 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        int uuarn;
        int err;
 
-       uuari = &dev->mdev.priv.uuari;
-       if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN)
+       uuari = &dev->mdev->priv.uuari;
+       if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
                return -EINVAL;
 
        if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
@@ -700,7 +700,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        qp->sq.offset = qp->rq.wqe_cnt << qp->rq.wqe_shift;
        qp->buf_size = err + (qp->rq.wqe_cnt << qp->rq.wqe_shift);
 
-       err = mlx5_buf_alloc(&dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
+       err = mlx5_buf_alloc(dev->mdev, qp->buf_size, PAGE_SIZE * 2, &qp->buf);
        if (err) {
                mlx5_ib_dbg(dev, "err %d\n", err);
                goto err_uuar;
@@ -722,7 +722,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
 
        mlx5_fill_page_array(&qp->buf, (*in)->pas);
 
-       err = mlx5_db_alloc(&dev->mdev, &qp->db);
+       err = mlx5_db_alloc(dev->mdev, &qp->db);
        if (err) {
                mlx5_ib_dbg(dev, "err %d\n", err);
                goto err_free;
@@ -747,7 +747,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
        return 0;
 
 err_wrid:
-       mlx5_db_free(&dev->mdev, &qp->db);
+       mlx5_db_free(dev->mdev, &qp->db);
        kfree(qp->sq.wqe_head);
        kfree(qp->sq.w_list);
        kfree(qp->sq.wrid);
@@ -758,23 +758,23 @@ err_free:
        mlx5_vfree(*in);
 
 err_buf:
-       mlx5_buf_free(&dev->mdev, &qp->buf);
+       mlx5_buf_free(dev->mdev, &qp->buf);
 
 err_uuar:
-       free_uuar(&dev->mdev.priv.uuari, uuarn);
+       free_uuar(&dev->mdev->priv.uuari, uuarn);
        return err;
 }
 
 static void destroy_qp_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
 {
-       mlx5_db_free(&dev->mdev, &qp->db);
+       mlx5_db_free(dev->mdev, &qp->db);
        kfree(qp->sq.wqe_head);
        kfree(qp->sq.w_list);
        kfree(qp->sq.wrid);
        kfree(qp->sq.wr_data);
        kfree(qp->rq.wrid);
-       mlx5_buf_free(&dev->mdev, &qp->buf);
-       free_uuar(&dev->mdev.priv.uuari, qp->bf->uuarn);
+       mlx5_buf_free(dev->mdev, &qp->buf);
+       free_uuar(&dev->mdev->priv.uuari, qp->bf->uuarn);
 }
 
 static __be32 get_rx_type(struct mlx5_ib_qp *qp, struct ib_qp_init_attr *attr)
@@ -812,7 +812,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
        spin_lock_init(&qp->rq.lock);
 
        if (init_attr->create_flags & IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK) {
-               if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
+               if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_BLOCK_MCAST)) {
                        mlx5_ib_dbg(dev, "block multicast loopback isn't supported\n");
                        return -EINVAL;
                } else {
@@ -851,9 +851,9 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
                                mlx5_ib_dbg(dev, "invalid rq params\n");
                                return -EINVAL;
                        }
-                       if (ucmd.sq_wqe_count > dev->mdev.caps.max_wqes) {
+                       if (ucmd.sq_wqe_count > dev->mdev->caps.max_wqes) {
                                mlx5_ib_dbg(dev, "requested sq_wqe_count (%d) > max allowed (%d)\n",
-                                           ucmd.sq_wqe_count, dev->mdev.caps.max_wqes);
+                                           ucmd.sq_wqe_count, dev->mdev->caps.max_wqes);
                                return -EINVAL;
                        }
                        err = create_user_qp(dev, pd, qp, udata, &in, &resp, &inlen);
@@ -957,7 +957,7 @@ static int create_qp_common(struct mlx5_ib_dev *dev, struct ib_pd *pd,
 
        in->ctx.db_rec_addr = cpu_to_be64(qp->db.dma);
 
-       err = mlx5_core_create_qp(&dev->mdev, &qp->mqp, in, inlen);
+       err = mlx5_core_create_qp(dev->mdev, &qp->mqp, in, inlen);
        if (err) {
                mlx5_ib_dbg(dev, "create qp failed\n");
                goto err_create;
@@ -1081,7 +1081,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
        if (!in)
                return;
        if (qp->state != IB_QPS_RESET)
-               if (mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(qp->state),
+               if (mlx5_core_qp_modify(dev->mdev, to_mlx5_state(qp->state),
                                        MLX5_QP_STATE_RST, in, sizeof(*in), &qp->mqp))
                        mlx5_ib_warn(dev, "mlx5_ib: modify QP %06x to RESET failed\n",
                                     qp->mqp.qpn);
@@ -1097,7 +1097,7 @@ static void destroy_qp_common(struct mlx5_ib_dev *dev, struct mlx5_ib_qp *qp)
                mlx5_ib_unlock_cqs(send_cq, recv_cq);
        }
 
-       err = mlx5_core_destroy_qp(&dev->mdev, &qp->mqp);
+       err = mlx5_core_destroy_qp(dev->mdev, &qp->mqp);
        if (err)
                mlx5_ib_warn(dev, "failed to destroy QP 0x%x\n", qp->mqp.qpn);
        kfree(in);
@@ -1165,7 +1165,7 @@ struct ib_qp *mlx5_ib_create_qp(struct ib_pd *pd,
        switch (init_attr->qp_type) {
        case IB_QPT_XRC_TGT:
        case IB_QPT_XRC_INI:
-               if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
+               if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC)) {
                        mlx5_ib_dbg(dev, "XRC not supported\n");
                        return ERR_PTR(-ENOSYS);
                }
@@ -1279,7 +1279,7 @@ static int ib_rate_to_mlx5(struct mlx5_ib_dev *dev, u8 rate)
        } else {
                while (rate != IB_RATE_2_5_GBPS &&
                       !(1 << (rate + MLX5_STAT_RATE_OFFSET) &
-                        dev->mdev.caps.stat_rate_support))
+                        dev->mdev->caps.stat_rate_support))
                        --rate;
        }
 
@@ -1318,9 +1318,9 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
        path->port = port;
 
        if (ah->ah_flags & IB_AH_GRH) {
-               if (ah->grh.sgid_index >= dev->mdev.caps.port[port - 1].gid_table_len) {
+               if (ah->grh.sgid_index >= dev->mdev->caps.port[port - 1].gid_table_len) {
                        pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
-                              ah->grh.sgid_index, dev->mdev.caps.port[port - 1].gid_table_len);
+                              ah->grh.sgid_index, dev->mdev->caps.port[port - 1].gid_table_len);
                        return -EINVAL;
                }
 
@@ -1539,7 +1539,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
                        err = -EINVAL;
                        goto out;
                }
-               context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev.caps.log_max_msg;
+               context->mtu_msgmax = (attr->path_mtu << 5) | dev->mdev->caps.log_max_msg;
        }
 
        if (attr_mask & IB_QP_DEST_QPN)
@@ -1637,7 +1637,7 @@ static int __mlx5_ib_modify_qp(struct ib_qp *ibqp,
        optpar = ib_mask_to_mlx5_opt(attr_mask);
        optpar &= opt_mask[mlx5_cur][mlx5_new][mlx5_st];
        in->optparam = cpu_to_be32(optpar);
-       err = mlx5_core_qp_modify(&dev->mdev, to_mlx5_state(cur_state),
+       err = mlx5_core_qp_modify(dev->mdev, to_mlx5_state(cur_state),
                                  to_mlx5_state(new_state), in, sqd_event,
                                  &qp->mqp);
        if (err)
@@ -1699,21 +1699,21 @@ int mlx5_ib_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr,
                goto out;
 
        if ((attr_mask & IB_QP_PORT) &&
-           (attr->port_num == 0 || attr->port_num > dev->mdev.caps.num_ports))
+           (attr->port_num == 0 || attr->port_num > dev->mdev->caps.num_ports))
                goto out;
 
        if (attr_mask & IB_QP_PKEY_INDEX) {
                port = attr_mask & IB_QP_PORT ? attr->port_num : qp->port;
-               if (attr->pkey_index >= dev->mdev.caps.port[port - 1].pkey_table_len)
+               if (attr->pkey_index >= dev->mdev->caps.port[port - 1].pkey_table_len)
                        goto out;
        }
 
        if (attr_mask & IB_QP_MAX_QP_RD_ATOMIC &&
-           attr->max_rd_atomic > dev->mdev.caps.max_ra_res_qp)
+           attr->max_rd_atomic > dev->mdev->caps.max_ra_res_qp)
                goto out;
 
        if (attr_mask & IB_QP_MAX_DEST_RD_ATOMIC &&
-           attr->max_dest_rd_atomic > dev->mdev.caps.max_ra_req_qp)
+           attr->max_dest_rd_atomic > dev->mdev->caps.max_ra_req_qp)
                goto out;
 
        if (cur_state == new_state && cur_state == IB_QPS_RESET) {
@@ -2479,7 +2479,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
 {
        struct mlx5_wqe_ctrl_seg *ctrl = NULL;  /* compiler warning */
        struct mlx5_ib_dev *dev = to_mdev(ibqp->device);
-       struct mlx5_core_dev *mdev = &dev->mdev;
+       struct mlx5_core_dev *mdev = dev->mdev;
        struct mlx5_ib_qp *qp = to_mqp(ibqp);
        struct mlx5_ib_mr *mr;
        struct mlx5_wqe_data_seg *dpseg;
@@ -2539,7 +2539,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                        case IB_WR_RDMA_WRITE_WITH_IMM:
                                set_raddr_seg(seg, wr->wr.rdma.remote_addr,
                                              wr->wr.rdma.rkey);
-                               seg  += sizeof(struct mlx5_wqe_raddr_seg);
+                               seg += sizeof(struct mlx5_wqe_raddr_seg);
                                size += sizeof(struct mlx5_wqe_raddr_seg) / 16;
                                break;
 
@@ -2668,7 +2668,7 @@ int mlx5_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
                case IB_QPT_SMI:
                case IB_QPT_GSI:
                        set_datagram_seg(seg, wr);
-                       seg  += sizeof(struct mlx5_wqe_datagram_seg);
+                       seg += sizeof(struct mlx5_wqe_datagram_seg);
                        size += sizeof(struct mlx5_wqe_datagram_seg) / 16;
                        if (unlikely((seg == qend)))
                                seg = mlx5_get_send_wqe(qp, 0);
@@ -2888,7 +2888,7 @@ static int to_ib_qp_access_flags(int mlx5_flags)
 static void to_ib_ah_attr(struct mlx5_ib_dev *ibdev, struct ib_ah_attr *ib_ah_attr,
                                struct mlx5_qp_path *path)
 {
-       struct mlx5_core_dev *dev = &ibdev->mdev;
+       struct mlx5_core_dev *dev = ibdev->mdev;
 
        memset(ib_ah_attr, 0, sizeof(*ib_ah_attr));
        ib_ah_attr->port_num      = path->port;
@@ -2931,7 +2931,7 @@ int mlx5_ib_query_qp(struct ib_qp *ibqp, struct ib_qp_attr *qp_attr, int qp_attr
                goto out;
        }
        context = &outb->ctx;
-       err = mlx5_core_qp_query(&dev->mdev, &qp->mqp, outb, sizeof(*outb));
+       err = mlx5_core_qp_query(dev->mdev, &qp->mqp, outb, sizeof(*outb));
        if (err)
                goto out_free;
 
@@ -3014,14 +3014,14 @@ struct ib_xrcd *mlx5_ib_alloc_xrcd(struct ib_device *ibdev,
        struct mlx5_ib_xrcd *xrcd;
        int err;
 
-       if (!(dev->mdev.caps.flags & MLX5_DEV_CAP_FLAG_XRC))
+       if (!(dev->mdev->caps.flags & MLX5_DEV_CAP_FLAG_XRC))
                return ERR_PTR(-ENOSYS);
 
        xrcd = kmalloc(sizeof(*xrcd), GFP_KERNEL);
        if (!xrcd)
                return ERR_PTR(-ENOMEM);
 
-       err = mlx5_core_xrcd_alloc(&dev->mdev, &xrcd->xrcdn);
+       err = mlx5_core_xrcd_alloc(dev->mdev, &xrcd->xrcdn);
        if (err) {
                kfree(xrcd);
                return ERR_PTR(-ENOMEM);
@@ -3036,7 +3036,7 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
        u32 xrcdn = to_mxrcd(xrcd)->xrcdn;
        int err;
 
-       err = mlx5_core_xrcd_dealloc(&dev->mdev, xrcdn);
+       err = mlx5_core_xrcd_dealloc(dev->mdev, xrcdn);
        if (err) {
                mlx5_ib_warn(dev, "failed to dealloc xrcdn 0x%x\n", xrcdn);
                return err;
index 384af6d..70bd131 100644 (file)
@@ -159,7 +159,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
        int page_shift;
        int npages;
 
-       err = mlx5_db_alloc(&dev->mdev, &srq->db);
+       err = mlx5_db_alloc(dev->mdev, &srq->db);
        if (err) {
                mlx5_ib_warn(dev, "alloc dbell rec failed\n");
                return err;
@@ -167,7 +167,7 @@ static int create_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq,
 
        *srq->db.db = 0;
 
-       if (mlx5_buf_alloc(&dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
+       if (mlx5_buf_alloc(dev->mdev, buf_size, PAGE_SIZE * 2, &srq->buf)) {
                mlx5_ib_dbg(dev, "buf alloc failed\n");
                err = -ENOMEM;
                goto err_db;
@@ -212,10 +212,10 @@ err_in:
        mlx5_vfree(*in);
 
 err_buf:
-       mlx5_buf_free(&dev->mdev, &srq->buf);
+       mlx5_buf_free(dev->mdev, &srq->buf);
 
 err_db:
-       mlx5_db_free(&dev->mdev, &srq->db);
+       mlx5_db_free(dev->mdev, &srq->db);
        return err;
 }
 
@@ -229,8 +229,8 @@ static void destroy_srq_user(struct ib_pd *pd, struct mlx5_ib_srq *srq)
 static void destroy_srq_kernel(struct mlx5_ib_dev *dev, struct mlx5_ib_srq *srq)
 {
        kfree(srq->wrid);
-       mlx5_buf_free(&dev->mdev, &srq->buf);
-       mlx5_db_free(&dev->mdev, &srq->db);
+       mlx5_buf_free(dev->mdev, &srq->buf);
+       mlx5_db_free(dev->mdev, &srq->db);
 }
 
 struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
@@ -248,10 +248,10 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
        u32 flgs, xrcdn;
 
        /* Sanity check SRQ size before proceeding */
-       if (init_attr->attr.max_wr >= dev->mdev.caps.max_srq_wqes) {
+       if (init_attr->attr.max_wr >= dev->mdev->caps.max_srq_wqes) {
                mlx5_ib_dbg(dev, "max_wr %d, cap %d\n",
                            init_attr->attr.max_wr,
-                           dev->mdev.caps.max_srq_wqes);
+                           dev->mdev->caps.max_srq_wqes);
                return ERR_PTR(-EINVAL);
        }
 
@@ -303,7 +303,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
 
        in->ctx.pd = cpu_to_be32(to_mpd(pd)->pdn);
        in->ctx.db_record = cpu_to_be64(srq->db.dma);
-       err = mlx5_core_create_srq(&dev->mdev, &srq->msrq, in, inlen);
+       err = mlx5_core_create_srq(dev->mdev, &srq->msrq, in, inlen);
        mlx5_vfree(in);
        if (err) {
                mlx5_ib_dbg(dev, "create SRQ failed, err %d\n", err);
@@ -327,7 +327,7 @@ struct ib_srq *mlx5_ib_create_srq(struct ib_pd *pd,
        return &srq->ibsrq;
 
 err_core:
-       mlx5_core_destroy_srq(&dev->mdev, &srq->msrq);
+       mlx5_core_destroy_srq(dev->mdev, &srq->msrq);
 
 err_usr_kern_srq:
        if (pd->uobject)
@@ -357,7 +357,7 @@ int mlx5_ib_modify_srq(struct ib_srq *ibsrq, struct ib_srq_attr *attr,
                        return -EINVAL;
 
                mutex_lock(&srq->mutex);
-               ret = mlx5_core_arm_srq(&dev->mdev, &srq->msrq, attr->srq_limit, 1);
+               ret = mlx5_core_arm_srq(dev->mdev, &srq->msrq, attr->srq_limit, 1);
                mutex_unlock(&srq->mutex);
 
                if (ret)
@@ -378,7 +378,7 @@ int mlx5_ib_query_srq(struct ib_srq *ibsrq, struct ib_srq_attr *srq_attr)
        if (!out)
                return -ENOMEM;
 
-       ret = mlx5_core_query_srq(&dev->mdev, &srq->msrq, out);
+       ret = mlx5_core_query_srq(dev->mdev, &srq->msrq, out);
        if (ret)
                goto out_box;
 
@@ -396,7 +396,7 @@ int mlx5_ib_destroy_srq(struct ib_srq *srq)
        struct mlx5_ib_dev *dev = to_mdev(srq->device);
        struct mlx5_ib_srq *msrq = to_msrq(srq);
 
-       mlx5_core_destroy_srq(&dev->mdev, &msrq->msrq);
+       mlx5_core_destroy_srq(dev->mdev, &msrq->msrq);
 
        if (srq->uobject) {
                mlx5_ib_db_unmap_user(to_mucontext(srq->uobject->context), &msrq->db);
index 5786a78..4e675f4 100644 (file)
@@ -1394,8 +1394,8 @@ struct ipoib_dev_priv *ipoib_intf_alloc(const char *name)
 {
        struct net_device *dev;
 
-       dev = alloc_netdev((int) sizeof (struct ipoib_dev_priv), name,
-                          ipoib_setup);
+       dev = alloc_netdev((int)sizeof(struct ipoib_dev_priv), name,
+                          NET_NAME_UNKNOWN, ipoib_setup);
        if (!dev)
                return NULL;
 
index 1c4c0db..29ca0bb 100644 (file)
@@ -257,9 +257,10 @@ static int input_handle_abs_event(struct input_dev *dev,
 }
 
 static int input_get_disposition(struct input_dev *dev,
-                         unsigned int type, unsigned int code, int value)
+                         unsigned int type, unsigned int code, int *pval)
 {
        int disposition = INPUT_IGNORE_EVENT;
+       int value = *pval;
 
        switch (type) {
 
@@ -357,6 +358,7 @@ static int input_get_disposition(struct input_dev *dev,
                break;
        }
 
+       *pval = value;
        return disposition;
 }
 
@@ -365,7 +367,7 @@ static void input_handle_event(struct input_dev *dev,
 {
        int disposition;
 
-       disposition = input_get_disposition(dev, type, code, value);
+       disposition = input_get_disposition(dev, type, code, &value);
 
        if ((disposition & INPUT_PASS_TO_DEVICE) && dev->event)
                dev->event(dev, type, code, value);
index 758b487..de7be4f 100644 (file)
@@ -215,6 +215,7 @@ static int keyscan_probe(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static int keyscan_suspend(struct device *dev)
 {
        struct platform_device *pdev = to_platform_device(dev);
@@ -249,6 +250,7 @@ static int keyscan_resume(struct device *dev)
        mutex_unlock(&input->mutex);
        return retval;
 }
+#endif
 
 static SIMPLE_DEV_PM_OPS(keyscan_dev_pm_ops, keyscan_suspend, keyscan_resume);
 
index e4104f9..fed5102 100644 (file)
@@ -213,7 +213,7 @@ static struct platform_driver sirfsoc_pwrc_driver = {
 
 module_platform_driver(sirfsoc_pwrc_driver);
 
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
 MODULE_AUTHOR("Binghua Duan <Binghua.Duan@csr.com>, Xianglong Du <Xianglong.Du@csr.com>");
 MODULE_DESCRIPTION("CSR Prima2 PWRC Driver");
 MODULE_ALIAS("platform:sirfsoc-pwrc");
index ec772d9..ef9e0b8 100644 (file)
@@ -132,7 +132,8 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                1232, 5710, 1156, 4696
        },
        {
-               (const char * const []){"LEN0034", "LEN0036", "LEN2004", NULL},
+               (const char * const []){"LEN0034", "LEN0036", "LEN2002",
+                                       "LEN2004", NULL},
                1024, 5112, 2024, 4832
        },
        {
@@ -168,7 +169,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0049",
        "LEN2000",
        "LEN2001", /* Edge E431 */
-       "LEN2002",
+       "LEN2002", /* Edge E531 */
        "LEN2003",
        "LEN2004", /* L440 */
        "LEN2005",
index 381b20d..136b7b2 100644 (file)
@@ -401,6 +401,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
                        DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
                },
        },
+       {
+               /* Acer Aspire 5710 */
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
+               },
+       },
        {
                /* Gericom Bellagio */
                .matches = {
index 977d05c..e73cf2c 100644 (file)
@@ -1217,9 +1217,9 @@ static void wacom_bpt3_touch_msg(struct wacom_wac *wacom, unsigned char *data)
                         * a=(pi*r^2)/C.
                         */
                        int a = data[5];
-                       int x_res  = input_abs_get_res(input, ABS_X);
-                       int y_res  = input_abs_get_res(input, ABS_Y);
-                       width  = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
+                       int x_res = input_abs_get_res(input, ABS_MT_POSITION_X);
+                       int y_res = input_abs_get_res(input, ABS_MT_POSITION_Y);
+                       width = 2 * int_sqrt(a * WACOM_CONTACT_AREA_SCALE);
                        height = width * y_res / x_res;
                }
 
@@ -1587,7 +1587,7 @@ static void wacom_abs_set_axis(struct input_dev *input_dev,
                input_abs_set_res(input_dev, ABS_X, features->x_resolution);
                input_abs_set_res(input_dev, ABS_Y, features->y_resolution);
        } else {
-               if (features->touch_max <= 2) {
+               if (features->touch_max == 1) {
                        input_set_abs_params(input_dev, ABS_X, 0,
                                features->x_max, features->x_fuzz, 0);
                        input_set_abs_params(input_dev, ABS_Y, 0,
@@ -1815,14 +1815,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
        case MTTPC:
        case MTTPC_B:
        case TABLETPC2FG:
-               if (features->device_type == BTN_TOOL_FINGER) {
-                       unsigned int flags = INPUT_MT_DIRECT;
-
-                       if (wacom_wac->features.type == TABLETPC2FG)
-                               flags = 0;
-
-                       input_mt_init_slots(input_dev, features->touch_max, flags);
-               }
+               if (features->device_type == BTN_TOOL_FINGER && features->touch_max > 1)
+                       input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_DIRECT);
                /* fall through */
 
        case TABLETPC:
@@ -1883,10 +1877,6 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                        __set_bit(BTN_RIGHT, input_dev->keybit);
 
                        if (features->touch_max) {
-                               /* touch interface */
-                               unsigned int flags = INPUT_MT_POINTER;
-
-                               __set_bit(INPUT_PROP_POINTER, input_dev->propbit);
                                if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MAJOR,
@@ -1894,12 +1884,8 @@ int wacom_setup_input_capabilities(struct input_dev *input_dev,
                                        input_set_abs_params(input_dev,
                                                     ABS_MT_TOUCH_MINOR,
                                                     0, features->y_max, 0, 0);
-                               } else {
-                                       __set_bit(BTN_TOOL_FINGER, input_dev->keybit);
-                                       __set_bit(BTN_TOOL_DOUBLETAP, input_dev->keybit);
-                                       flags = 0;
                                }
-                               input_mt_init_slots(input_dev, features->touch_max, flags);
+                               input_mt_init_slots(input_dev, features->touch_max, INPUT_MT_POINTER);
                        } else {
                                /* buttons/keys only interface */
                                __clear_bit(ABS_X, input_dev->absbit);
index 4e793a1..2ce6495 100644 (file)
@@ -359,9 +359,12 @@ static int titsc_parse_dt(struct platform_device *pdev,
         */
        err = of_property_read_u32(node, "ti,coordinate-readouts",
                        &ts_dev->coordinate_readouts);
-       if (err < 0)
+       if (err < 0) {
+               dev_warn(&pdev->dev, "please use 'ti,coordinate-readouts' instead\n");
                err = of_property_read_u32(node, "ti,coordiante-readouts",
                                &ts_dev->coordinate_readouts);
+       }
+
        if (err < 0)
                return err;
 
index d4daa05..499b436 100644 (file)
@@ -45,7 +45,7 @@ struct pri_queue {
 struct pasid_state {
        struct list_head list;                  /* For global state-list */
        atomic_t count;                         /* Reference count */
-       atomic_t mmu_notifier_count;            /* Counting nested mmu_notifier
+       unsigned mmu_notifier_count;            /* Counting nested mmu_notifier
                                                   calls */
        struct task_struct *task;               /* Task bound to this PASID */
        struct mm_struct *mm;                   /* mm_struct for the faults */
@@ -53,7 +53,8 @@ struct pasid_state {
        struct pri_queue pri[PRI_QUEUE_SIZE];   /* PRI tag states */
        struct device_state *device_state;      /* Link to our device_state */
        int pasid;                              /* PASID index */
-       spinlock_t lock;                        /* Protect pri_queues */
+       spinlock_t lock;                        /* Protect pri_queues and
+                                                  mmu_notifer_count */
        wait_queue_head_t wq;                   /* To wait for count == 0 */
 };
 
@@ -431,15 +432,19 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
 {
        struct pasid_state *pasid_state;
        struct device_state *dev_state;
+       unsigned long flags;
 
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) {
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       if (pasid_state->mmu_notifier_count == 0) {
                amd_iommu_domain_set_gcr3(dev_state->domain,
                                          pasid_state->pasid,
                                          __pa(empty_page_table));
        }
+       pasid_state->mmu_notifier_count += 1;
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
 }
 
 static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -448,15 +453,19 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
 {
        struct pasid_state *pasid_state;
        struct device_state *dev_state;
+       unsigned long flags;
 
        pasid_state = mn_to_state(mn);
        dev_state   = pasid_state->device_state;
 
-       if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) {
+       spin_lock_irqsave(&pasid_state->lock, flags);
+       pasid_state->mmu_notifier_count -= 1;
+       if (pasid_state->mmu_notifier_count == 0) {
                amd_iommu_domain_set_gcr3(dev_state->domain,
                                          pasid_state->pasid,
                                          __pa(pasid_state->mm->pgd));
        }
+       spin_unlock_irqrestore(&pasid_state->lock, flags);
 }
 
 static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -650,7 +659,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
                goto out;
 
        atomic_set(&pasid_state->count, 1);
-       atomic_set(&pasid_state->mmu_notifier_count, 0);
        init_waitqueue_head(&pasid_state->wq);
        spin_lock_init(&pasid_state->lock);
 
index b99dd88..bb446d7 100644 (file)
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
 static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
 {
        /* Bug if not a power of 2 */
-       BUG_ON(!is_power_of_2(addrspace_size));
+       BUG_ON((addrspace_size & (addrspace_size - 1)));
 
        /* window size is 2^(WSE+1) bytes */
-       return __ffs(addrspace_size) - 1;
+       return fls64(addrspace_size) - 2;
 }
 
 /* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
        struct paace *ppaace;
        unsigned long fspi;
 
-       if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) {
+       if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
                pr_debug("window size too small or not a power of two %llx\n", win_size);
                return -EINVAL;
        }
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
                return -ENOENT;
        }
 
-       if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) {
+       if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
                pr_debug("subwindow size out of range, or not a power of 2\n");
                return -EINVAL;
        }
index 93072ba..af47648 100644 (file)
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
         * Size must be a power of two and at least be equal
         * to PAMU page size.
         */
-       if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) {
+       if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
                pr_debug("%s: size too small or not a power of two\n", __func__);
                return -EINVAL;
        }
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
        return domain;
 }
 
-static inline struct device_domain_info *find_domain(struct device *dev)
-{
-       return dev->archdata.iommu_domain;
-}
-
 static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
 {
        unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * Check here if the device is already attached to domain or not.
         * If the device is already attached to a domain detach it.
         */
-       old_domain_info = find_domain(dev);
+       old_domain_info = dev->archdata.iommu_domain;
        if (old_domain_info && old_domain_info->domain != dma_domain) {
                spin_unlock_irqrestore(&device_domain_lock, flags);
                detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
         * the info for the first LIODN as all
         * LIODNs share the same domain
         */
-       if (!old_domain_info)
+       if (!dev->archdata.iommu_domain)
                dev->archdata.iommu_domain = info;
        spin_unlock_irqrestore(&device_domain_lock, flags);
 
@@ -1042,12 +1037,15 @@ root_bus:
                        group = get_shared_pci_device_group(pdev);
        }
 
+       if (!group)
+               group = ERR_PTR(-ENODEV);
+
        return group;
 }
 
 static int fsl_pamu_add_device(struct device *dev)
 {
-       struct iommu_group *group = NULL;
+       struct iommu_group *group = ERR_PTR(-ENODEV);
        struct pci_dev *pdev;
        const u32 *prop;
        int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
                        group = get_device_iommu_group(dev);
        }
 
-       if (!group || IS_ERR(group))
+       if (IS_ERR(group))
                return PTR_ERR(group);
 
        ret = iommu_group_add_device(group, dev);
index 6bb3277..51b6b77 100644 (file)
@@ -3816,14 +3816,11 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
                                ((void *)rmrr) + rmrr->header.length,
                                rmrr->segment, rmrru->devices,
                                rmrru->devices_cnt);
-                       if (ret > 0)
-                               break;
-                       else if(ret < 0)
+                       if(ret < 0)
                                return ret;
                } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
-                       if (dmar_remove_dev_scope(info, rmrr->segment,
-                               rmrru->devices, rmrru->devices_cnt))
-                               break;
+                       dmar_remove_dev_scope(info, rmrr->segment,
+                               rmrru->devices, rmrru->devices_cnt);
                }
        }
 
index c887e6e..574aba0 100644 (file)
@@ -334,6 +334,15 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
 
 static void armada_xp_mpic_smp_cpu_init(void)
 {
+       u32 control;
+       int nr_irqs, i;
+
+       control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+       nr_irqs = (control >> 2) & 0x3ff;
+
+       for (i = 0; i < nr_irqs; i++)
+               writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
+
        /* Clear pending IPIs */
        writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
 
@@ -474,7 +483,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
                                             struct device_node *parent)
 {
        struct resource main_int_res, per_cpu_int_res;
-       int parent_irq;
+       int parent_irq, nr_irqs, i;
        u32 control;
 
        BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -496,9 +505,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
        BUG_ON(!per_cpu_int_base);
 
        control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
+       nr_irqs = (control >> 2) & 0x3ff;
+
+       for (i = 0; i < nr_irqs; i++)
+               writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
 
        armada_370_xp_mpic_domain =
-               irq_domain_add_linear(node, (control >> 2) & 0x3ff,
+               irq_domain_add_linear(node, nr_irqs,
                                &armada_370_xp_mpic_irq_ops, NULL);
 
        BUG_ON(!armada_370_xp_mpic_domain);
index 8ee2a36..c15c840 100644 (file)
@@ -150,7 +150,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
 
        /* Allocate a single Generic IRQ chip for this node */
        ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
-                               np->full_name, handle_level_irq, clr, 0, 0);
+                               np->full_name, handle_edge_irq, clr, 0, 0);
        if (ret) {
                pr_err("failed to allocate generic irq chip\n");
                goto out_free_domain;
index 7e11c9d..7c131cf 100644 (file)
@@ -42,6 +42,7 @@
 #include <linux/irqchip/chained_irq.h>
 #include <linux/irqchip/arm-gic.h>
 
+#include <asm/cputype.h>
 #include <asm/irq.h>
 #include <asm/exception.h>
 #include <asm/smp_plat.h>
@@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                }
 
                for_each_possible_cpu(cpu) {
-                       unsigned long offset = percpu_offset * cpu_logical_map(cpu);
+                       u32 mpidr = cpu_logical_map(cpu);
+                       u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
+                       unsigned long offset = percpu_offset * core_id;
                        *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
                        *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
                }
@@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
        gic_cnt++;
        return 0;
 }
+IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
+IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
 
index 3fdda3a..6ce6bd3 100644 (file)
@@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
 };
 
 static struct spear_shirq spear320_shirq_ras3 = {
-       .irq_nr = 3,
+       .irq_nr = 7,
        .irq_bit_off = 0,
        .invalid_irq = 1,
        .regs = {
index f9a87ed..6a2df32 100644 (file)
@@ -1260,7 +1260,7 @@ static int __init capinc_tty_init(void)
        if (capi_ttyminors <= 0)
                capi_ttyminors = CAPINC_NR_PORTS;
 
-       capiminors = kzalloc(sizeof(struct capi_minor *) * capi_ttyminors,
+       capiminors = kzalloc(sizeof(struct capiminor *) * capi_ttyminors,
                             GFP_KERNEL);
        if (!capiminors)
                return -ENOMEM;
index c44950d..b7ae0a0 100644 (file)
@@ -2400,6 +2400,7 @@ allocerr:
 error:
        freeurbs(cs);
        usb_set_intfdata(interface, NULL);
+       usb_put_dev(udev);
        gigaset_freecs(cs);
        return rc;
 }
index d9edcc9..97465ac 100644 (file)
@@ -16,7 +16,7 @@ config ISDN_DRV_HISAX
          also to the configuration option of the driver for your particular
          card, below.
 
-if ISDN_DRV_HISAX!=n
+if ISDN_DRV_HISAX
 
 comment "D-channel protocol features"
 
@@ -348,10 +348,6 @@ config HISAX_ENTERNOW_PCI
          This enables HiSax support for the Formula-n enter:now PCI
          ISDN card.
 
-endif
-
-if ISDN_DRV_HISAX
-
 config HISAX_DEBUG
        bool "HiSax debugging"
        help
@@ -420,11 +416,6 @@ config HISAX_FRITZ_PCIPNP
          (the latter also needs you to select "ISA Plug and Play support"
          from the menu "Plug and Play configuration")
 
-config HISAX_AVM_A1_PCMCIA
-       bool
-       depends on HISAX_AVM_A1_CS
-       default y
-
 endif
 
 endmenu
index 0df6691..8dc791b 100644 (file)
@@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
                        memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
                        l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
 
-                       if (ic->parm.ni1_io.timeout > 0)
-                               if (!(pc = ni1_new_l3_process(st, -1)))
-                               { free_invoke_id(st, id);
+                       if (ic->parm.ni1_io.timeout > 0) {
+                               pc = ni1_new_l3_process(st, -1);
+                               if (!pc) {
+                                       free_invoke_id(st, id);
                                        return (-2);
                                }
-                       pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */
-                       pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */
+                               /* remember id */
+                               pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
+                               /* and procedure */
+                               pc->prot.ni1.proc = ic->parm.ni1_io.proc;
+                       }
 
                        if (!(skb = l3_alloc_skb(l)))
                        { free_invoke_id(st, id);
index d9aebbc..94affa5 100644 (file)
@@ -2588,7 +2588,8 @@ isdn_net_new(char *name, struct net_device *master)
                printk(KERN_WARNING "isdn_net: Could not allocate net-device\n");
                return NULL;
        }
-       netdev->dev = alloc_netdev(sizeof(isdn_net_local), name, _isdn_setup);
+       netdev->dev = alloc_netdev(sizeof(isdn_net_local), name,
+                                  NET_NAME_UNKNOWN, _isdn_setup);
        if (!netdev->dev) {
                printk(KERN_WARNING "isdn_net: Could not allocate network device\n");
                kfree(netdev);
@@ -2917,8 +2918,8 @@ isdn_net_getcfg(isdn_net_ioctl_cfg *cfg)
                        cfg->callback = 2;
                cfg->cbhup = (lp->flags & ISDN_NET_CBHUP) ? 1 : 0;
                cfg->dialmode = lp->flags & ISDN_NET_DIALMODE_MASK;
-               cfg->chargehup = (lp->hupflags & 4) ? 1 : 0;
-               cfg->ihup = (lp->hupflags & 8) ? 1 : 0;
+               cfg->chargehup = (lp->hupflags & ISDN_CHARGEHUP) ? 1 : 0;
+               cfg->ihup = (lp->hupflags & ISDN_INHUP) ? 1 : 0;
                cfg->cbdelay = lp->cbdelay;
                cfg->dialmax = lp->dialmax;
                cfg->triggercps = lp->triggercps;
index 61ac632..c4198fa 100644 (file)
@@ -379,12 +379,12 @@ isdn_ppp_release(int min, struct file *file)
 #endif
 #ifdef CONFIG_IPPP_FILTER
        if (is->pass_filter) {
-               sk_unattached_filter_destroy(is->pass_filter);
+               bpf_prog_destroy(is->pass_filter);
                is->pass_filter = NULL;
        }
 
        if (is->active_filter) {
-               sk_unattached_filter_destroy(is->active_filter);
+               bpf_prog_destroy(is->active_filter);
                is->active_filter = NULL;
        }
 #endif
@@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
 {
        struct sock_fprog uprog;
        struct sock_filter *code = NULL;
-       int len, err;
+       int len;
 
        if (copy_from_user(&uprog, arg, sizeof(uprog)))
                return -EFAULT;
@@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
        if (IS_ERR(code))
                return PTR_ERR(code);
 
-       err = sk_chk_filter(code, uprog.len);
-       if (err) {
-               kfree(code);
-               return err;
-       }
-
        *p = code;
        return uprog.len;
 }
@@ -644,9 +638,14 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->pass_filter)
-                       sk_unattached_filter_destroy(is->pass_filter);
-               err = sk_unattached_filter_create(&is->pass_filter, &fprog);
+               if (is->pass_filter) {
+                       bpf_prog_destroy(is->pass_filter);
+                       is->pass_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = bpf_prog_create(&is->pass_filter, &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
@@ -663,9 +662,14 @@ isdn_ppp_ioctl(int min, struct file *file, unsigned int cmd, unsigned long arg)
                fprog.len = len;
                fprog.filter = code;
 
-               if (is->active_filter)
-                       sk_unattached_filter_destroy(is->active_filter);
-               err = sk_unattached_filter_create(&is->active_filter, &fprog);
+               if (is->active_filter) {
+                       bpf_prog_destroy(is->active_filter);
+                       is->active_filter = NULL;
+               }
+               if (fprog.filter != NULL)
+                       err = bpf_prog_create(&is->active_filter, &fprog);
+               else
+                       err = 0;
                kfree(code);
 
                return err;
@@ -1168,14 +1172,14 @@ isdn_ppp_push_higher(isdn_net_dev *net_dev, isdn_net_local *lp, struct sk_buff *
        }
 
        if (is->pass_filter
-           && SK_RUN_FILTER(is->pass_filter, skb) == 0) {
+           && BPF_PROG_RUN(is->pass_filter, skb) == 0) {
                if (is->debug & 0x2)
                        printk(KERN_DEBUG "IPPP: inbound frame filtered.\n");
                kfree_skb(skb);
                return;
        }
        if (!(is->active_filter
-             && SK_RUN_FILTER(is->active_filter, skb) == 0)) {
+             && BPF_PROG_RUN(is->active_filter, skb) == 0)) {
                if (is->debug & 0x2)
                        printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
                lp->huptimer = 0;
@@ -1314,14 +1318,14 @@ isdn_ppp_xmit(struct sk_buff *skb, struct net_device *netdev)
        }
 
        if (ipt->pass_filter
-           && SK_RUN_FILTER(ipt->pass_filter, skb) == 0) {
+           && BPF_PROG_RUN(ipt->pass_filter, skb) == 0) {
                if (ipt->debug & 0x4)
                        printk(KERN_DEBUG "IPPP: outbound frame filtered.\n");
                kfree_skb(skb);
                goto unlock;
        }
        if (!(ipt->active_filter
-             && SK_RUN_FILTER(ipt->active_filter, skb) == 0)) {
+             && BPF_PROG_RUN(ipt->active_filter, skb) == 0)) {
                if (ipt->debug & 0x4)
                        printk(KERN_DEBUG "IPPP: link-active filter: resetting huptimer.\n");
                lp->huptimer = 0;
@@ -1511,9 +1515,9 @@ int isdn_ppp_autodial_filter(struct sk_buff *skb, isdn_net_local *lp)
        }
 
        drop |= is->pass_filter
-               && SK_RUN_FILTER(is->pass_filter, skb) == 0;
+               && BPF_PROG_RUN(is->pass_filter, skb) == 0;
        drop |= is->active_filter
-               && SK_RUN_FILTER(is->active_filter, skb) == 0;
+               && BPF_PROG_RUN(is->active_filter, skb) == 0;
 
        skb_push(skb, IPPP_MAX_HEADER - 4);
        return drop;
index f02cc50..4172e22 100644 (file)
@@ -1035,14 +1035,14 @@ static void pcbit_set_msn(struct pcbit_dev *dev, char *list)
                }
                ptr->next = NULL;
 
-               ptr->msn = kmalloc(len, GFP_ATOMIC);
+               ptr->msn = kmalloc(len + 1, GFP_ATOMIC);
                if (!ptr->msn) {
                        printk(KERN_WARNING "kmalloc failed\n");
                        kfree(ptr);
                        return;
                }
 
-               memcpy(ptr->msn, sp, len - 1);
+               memcpy(ptr->msn, sp, len);
                ptr->msn[len] = 0;
 
 #ifdef DEBUG
index 23b4a3b..4eab93a 100644 (file)
@@ -1257,7 +1257,8 @@ static unsigned int smu_fpoll(struct file *file, poll_table *wait)
                if (pp->busy && pp->cmd.status != 1)
                        mask |= POLLIN;
                spin_unlock_irqrestore(&pp->lock, flags);
-       } if (pp->mode == smu_file_events) {
+       }
+       if (pp->mode == smu_file_events) {
                /* Not yet implemented */
        }
        return mask;
index 4ead4ba..d2899e7 100644 (file)
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)cmd->data_block_size);
+               r = -EINVAL;
+               goto bad;
+       }
+
        r = __check_incompat_features(disk_super, cmd);
        if (r < 0)
                goto bad;
index 53b2132..4cba2d8 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
+ * Copyright (C) 2003 Jana Saout <jana@saout.de>
  * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
  * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
  * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
@@ -1996,6 +1996,6 @@ static void __exit dm_crypt_exit(void)
 module_init(dm_crypt_init);
 module_exit(dm_crypt_exit);
 
-MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
+MODULE_AUTHOR("Jana Saout <jana@saout.de>");
 MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
 MODULE_LICENSE("GPL");
index 3842ac7..db404a0 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/device-mapper.h>
 
 #include <linux/bio.h>
+#include <linux/completion.h>
 #include <linux/mempool.h>
 #include <linux/module.h>
 #include <linux/sched.h>
@@ -32,7 +33,7 @@ struct dm_io_client {
 struct io {
        unsigned long error_bits;
        atomic_t count;
-       struct task_struct *sleeper;
+       struct completion *wait;
        struct dm_io_client *client;
        io_notify_fn callback;
        void *context;
@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
                        invalidate_kernel_vmap_range(io->vma_invalidate_address,
                                                     io->vma_invalidate_size);
 
-               if (io->sleeper)
-                       wake_up_process(io->sleeper);
+               if (io->wait)
+                       complete(io->wait);
 
                else {
                        unsigned long r = io->error_bits;
@@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
         */
        volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
        struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
+       DECLARE_COMPLETION_ONSTACK(wait);
 
        if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
                WARN_ON(1);
@@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
 
        io->error_bits = 0;
        atomic_set(&io->count, 1); /* see dispatch_io() */
-       io->sleeper = current;
+       io->wait = &wait;
        io->client = client;
 
        io->vma_invalidate_address = dp->vma_invalidate_address;
@@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
 
        dispatch_io(rw, num_regions, where, dp, io, 1);
 
-       while (1) {
-               set_current_state(TASK_UNINTERRUPTIBLE);
-
-               if (!atomic_read(&io->count))
-                       break;
-
-               io_schedule();
-       }
-       set_current_state(TASK_RUNNING);
+       wait_for_completion_io(&wait);
 
        if (error_bits)
                *error_bits = io->error_bits;
@@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
        io = mempool_alloc(client->pool, GFP_NOIO);
        io->error_bits = 0;
        atomic_set(&io->count, 1); /* see dispatch_io() */
-       io->sleeper = NULL;
+       io->wait = NULL;
        io->client = client;
        io->callback = fn;
        io->context = context;
index 3f6fd9d..f4167b0 100644 (file)
@@ -1611,8 +1611,9 @@ static int multipath_busy(struct dm_target *ti)
 
        spin_lock_irqsave(&m->lock, flags);
 
-       /* pg_init in progress, requeue until done */
-       if (!pg_ready(m)) {
+       /* pg_init in progress or no paths available */
+       if (m->pg_init_in_progress ||
+           (!m->nr_valid_paths && m->queue_if_no_path)) {
                busy = 1;
                goto out;
        }
index b086a94..e9d33ad 100644 (file)
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
 
        disk_super = dm_block_data(sblock);
 
+       /* Verify the data block size hasn't changed */
+       if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
+               DMERR("changing the data block size (from %u to %llu) is not supported",
+                     le32_to_cpu(disk_super->data_block_size),
+                     (unsigned long long)pmd->data_block_size);
+               r = -EINVAL;
+               goto bad_unlock_sblock;
+       }
+
        r = __check_incompat_features(disk_super, pmd);
        if (r < 0)
                goto bad_unlock_sblock;
index c99003e..b9a64bb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2003 Christophe Saout <christophe@saout.de>
+ * Copyright (C) 2003 Jana Saout <jana@saout.de>
  *
  * This file is released under the GPL.
  */
@@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void)
 module_init(dm_zero_init)
 module_exit(dm_zero_exit)
 
-MODULE_AUTHOR("Christophe Saout <christophe@saout.de>");
+MODULE_AUTHOR("Jana Saout <jana@saout.de>");
 MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");
 MODULE_LICENSE("GPL");
index 437d990..32b958d 100644 (file)
@@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w);
 
 static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
 
+static struct workqueue_struct *deferred_remove_workqueue;
+
 /*
  * For bio-based dm.
  * One of these is allocated per bio.
@@ -276,16 +278,24 @@ static int __init local_init(void)
        if (r)
                goto out_free_rq_tio_cache;
 
+       deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
+       if (!deferred_remove_workqueue) {
+               r = -ENOMEM;
+               goto out_uevent_exit;
+       }
+
        _major = major;
        r = register_blkdev(_major, _name);
        if (r < 0)
-               goto out_uevent_exit;
+               goto out_free_workqueue;
 
        if (!_major)
                _major = r;
 
        return 0;
 
+out_free_workqueue:
+       destroy_workqueue(deferred_remove_workqueue);
 out_uevent_exit:
        dm_uevent_exit();
 out_free_rq_tio_cache:
@@ -299,6 +309,7 @@ out_free_io_cache:
 static void local_exit(void)
 {
        flush_scheduled_work();
+       destroy_workqueue(deferred_remove_workqueue);
 
        kmem_cache_destroy(_rq_tio_cache);
        kmem_cache_destroy(_io_cache);
@@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
 
        if (atomic_dec_and_test(&md->open_count) &&
            (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
-               schedule_work(&deferred_remove_work);
+               queue_work(deferred_remove_workqueue, &deferred_remove_work);
 
        dm_put(md);
 
index 3484685..32fc19c 100644 (file)
@@ -5599,7 +5599,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
        if (mddev->in_sync)
                info.state = (1<<MD_SB_CLEAN);
        if (mddev->bitmap && mddev->bitmap_info.offset)
-               info.state = (1<<MD_SB_BITMAP_PRESENT);
+               info.state |= (1<<MD_SB_BITMAP_PRESENT);
        info.active_disks  = insync;
        info.working_disks = working;
        info.failed_disks  = failed;
@@ -7501,6 +7501,19 @@ void md_do_sync(struct md_thread *thread)
                            rdev->recovery_offset < j)
                                j = rdev->recovery_offset;
                rcu_read_unlock();
+
+               /* If there is a bitmap, we need to make sure all
+                * writes that started before we added a spare
+                * complete before we start doing a recovery.
+                * Otherwise the write might complete and (via
+                * bitmap_endwrite) set a bit in the bitmap after the
+                * recovery has checked that bit and skipped that
+                * region.
+                */
+               if (mddev->bitmap) {
+                       mddev->pers->quiesce(mddev, 1);
+                       mddev->pers->quiesce(mddev, 0);
+               }
        }
 
        printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
index 8a86b30..059e611 100644 (file)
@@ -1276,7 +1276,8 @@ static int dvb_net_add_if(struct dvb_net *dvbnet, u16 pid, u8 feedtype)
        if ((if_num = get_if(dvbnet)) < 0)
                return -EINVAL;
 
-       net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb", dvb_net_setup);
+       net = alloc_netdev(sizeof(struct dvb_net_priv), "dvb",
+                          NET_NAME_UNKNOWN, dvb_net_setup);
        if (!net)
                return -ENOMEM;
 
index 8637d2e..2e3cdcf 100644 (file)
@@ -60,7 +60,7 @@ static int si2168_cmd_execute(struct si2168 *s, struct si2168_cmd *cmd)
                                jiffies_to_msecs(jiffies) -
                                (jiffies_to_msecs(timeout) - TIMEOUT));
 
-               if (!(cmd->args[0] >> 7) & 0x01) {
+               if (!((cmd->args[0] >> 7) & 0x01)) {
                        ret = -ETIMEDOUT;
                        goto err_mutex_unlock;
                }
@@ -485,20 +485,6 @@ static int si2168_init(struct dvb_frontend *fe)
        if (ret)
                goto err;
 
-       cmd.args[0] = 0x05;
-       cmd.args[1] = 0x00;
-       cmd.args[2] = 0xaa;
-       cmd.args[3] = 0x4d;
-       cmd.args[4] = 0x56;
-       cmd.args[5] = 0x40;
-       cmd.args[6] = 0x00;
-       cmd.args[7] = 0x00;
-       cmd.wlen = 8;
-       cmd.rlen = 1;
-       ret = si2168_cmd_execute(s, &cmd);
-       if (ret)
-               goto err;
-
        /* cold state - try to download firmware */
        dev_info(&s->client->dev, "%s: found a '%s' in cold state\n",
                        KBUILD_MODNAME, si2168_ops.info.name);
index 2a343e8..53f7f06 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/firmware.h>
 #include <linux/i2c-mux.h>
 
-#define SI2168_FIRMWARE "dvb-demod-si2168-01.fw"
+#define SI2168_FIRMWARE "dvb-demod-si2168-02.fw"
 
 /* state struct */
 struct si2168 {
index 522fe00..9619be5 100644 (file)
@@ -668,6 +668,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
        struct dtv_frontend_properties *c = &fe->dtv_property_cache;
        int ret, i;
        u8 mode, rolloff, pilot, inversion, div;
+       fe_modulation_t modulation;
 
        dev_dbg(&priv->i2c->dev,
                        "%s: delivery_system=%d modulation=%d frequency=%d symbol_rate=%d inversion=%d pilot=%d rolloff=%d\n",
@@ -702,10 +703,13 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        switch (c->delivery_system) {
        case SYS_DVBS:
+               modulation = QPSK;
                rolloff = 0;
                pilot = 2;
                break;
        case SYS_DVBS2:
+               modulation = c->modulation;
+
                switch (c->rolloff) {
                case ROLLOFF_20:
                        rolloff = 2;
@@ -750,7 +754,7 @@ static int tda10071_set_frontend(struct dvb_frontend *fe)
 
        for (i = 0, mode = 0xff; i < ARRAY_SIZE(TDA10071_MODCOD); i++) {
                if (c->delivery_system == TDA10071_MODCOD[i].delivery_system &&
-                       c->modulation == TDA10071_MODCOD[i].modulation &&
+                       modulation == TDA10071_MODCOD[i].modulation &&
                        c->fec_inner == TDA10071_MODCOD[i].fec) {
                        mode = TDA10071_MODCOD[i].val;
                        dev_dbg(&priv->i2c->dev, "%s: mode found=%02x\n",
@@ -834,10 +838,10 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
 
        switch ((buf[1] >> 0) & 0x01) {
        case 0:
-               c->inversion = INVERSION_OFF;
+               c->inversion = INVERSION_ON;
                break;
        case 1:
-               c->inversion = INVERSION_ON;
+               c->inversion = INVERSION_OFF;
                break;
        }
 
@@ -856,7 +860,7 @@ static int tda10071_get_frontend(struct dvb_frontend *fe)
        if (ret)
                goto error;
 
-       c->symbol_rate = (buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0);
+       c->symbol_rate = ((buf[0] << 16) | (buf[1] << 8) | (buf[2] << 0)) * 1000;
 
        return ret;
 error:
index 4baf14b..4204861 100644 (file)
@@ -55,6 +55,7 @@ static struct tda10071_modcod {
        { SYS_DVBS2, QPSK,  FEC_8_9,  0x0a },
        { SYS_DVBS2, QPSK,  FEC_9_10, 0x0b },
        /* 8PSK */
+       { SYS_DVBS2, PSK_8, FEC_AUTO, 0x00 },
        { SYS_DVBS2, PSK_8, FEC_3_5,  0x0c },
        { SYS_DVBS2, PSK_8, FEC_2_3,  0x0d },
        { SYS_DVBS2, PSK_8, FEC_3_4,  0x0e },
index e65c760..0006d6b 100644 (file)
@@ -179,7 +179,7 @@ static const struct v4l2_file_operations ts_fops =
        .read     = vb2_fop_read,
        .poll     = vb2_fop_poll,
        .mmap     = vb2_fop_mmap,
-       .ioctl    = video_ioctl2,
+       .unlocked_ioctl = video_ioctl2,
 };
 
 static const struct v4l2_ioctl_ops ts_ioctl_ops = {
index a7ed164..1e4ec69 100644 (file)
@@ -269,6 +269,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 5bb085b..b431b58 100644 (file)
@@ -233,6 +233,7 @@ err:
                list_del(&buf->list);
                vb2_buffer_done(&buf->vb, VB2_BUF_STATE_QUEUED);
        }
+       spin_unlock_irqrestore(&common->irqlock, flags);
 
        return ret;
 }
index 271a752..fa4cc7b 100644 (file)
@@ -57,7 +57,7 @@ static int si2157_cmd_execute(struct si2157 *s, struct si2157_cmd *cmd)
                        jiffies_to_msecs(jiffies) -
                        (jiffies_to_msecs(timeout) - TIMEOUT));
 
-       if (!(buf[0] >> 7) & 0x01) {
+       if (!((buf[0] >> 7) & 0x01)) {
                ret = -ETIMEDOUT;
                goto err_mutex_unlock;
        } else {
index 021e4d3..7b9b75f 100644 (file)
@@ -704,15 +704,41 @@ static int af9035_read_config(struct dvb_usb_device *d)
                if (ret < 0)
                        goto err;
 
-               if (tmp == 0x00)
-                       dev_dbg(&d->udev->dev,
-                                       "%s: [%d]tuner not set, using default\n",
-                                       __func__, i);
-               else
+               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
+                               __func__, i, tmp);
+
+               /* tuner sanity check */
+               if (state->chip_type == 0x9135) {
+                       if (state->chip_version == 0x02) {
+                               /* IT9135 BX (v2) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_60:
+                               case AF9033_TUNER_IT9135_61:
+                               case AF9033_TUNER_IT9135_62:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       } else {
+                               /* IT9135 AX (v1) */
+                               switch (tmp) {
+                               case AF9033_TUNER_IT9135_38:
+                               case AF9033_TUNER_IT9135_51:
+                               case AF9033_TUNER_IT9135_52:
+                                       state->af9033_config[i].tuner = tmp;
+                                       break;
+                               }
+                       }
+               } else {
+                       /* AF9035 */
                        state->af9033_config[i].tuner = tmp;
+               }
 
-               dev_dbg(&d->udev->dev, "%s: [%d]tuner=%02x\n",
-                               __func__, i, state->af9033_config[i].tuner);
+               if (state->af9033_config[i].tuner != tmp) {
+                       dev_info(&d->udev->dev,
+                                       "%s: [%d] overriding tuner from %02x to %02x\n",
+                                       KBUILD_MODNAME, i, tmp,
+                                       state->af9033_config[i].tuner);
+               }
 
                switch (state->af9033_config[i].tuner) {
                case AF9033_TUNER_TUA9001:
index 2fd1c5e..339adce 100644 (file)
@@ -928,6 +928,7 @@ static const struct usb_device_id device_table[] = {
        {USB_DEVICE(0x093a, 0x2620)},
        {USB_DEVICE(0x093a, 0x2621)},
        {USB_DEVICE(0x093a, 0x2622), .driver_info = FL_VFLIP},
+       {USB_DEVICE(0x093a, 0x2623), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2624), .driver_info = FL_VFLIP},
        {USB_DEVICE(0x093a, 0x2625)},
        {USB_DEVICE(0x093a, 0x2626)},
index 0500c41..6bce01a 100644 (file)
@@ -82,7 +82,7 @@ static void hdpvr_read_bulk_callback(struct urb *urb)
 }
 
 /*=========================================================================*/
-/* bufffer bits */
+/* buffer bits */
 
 /* function expects dev->io_mutex to be hold by caller */
 int hdpvr_cancel_queue(struct hdpvr_device *dev)
@@ -926,7 +926,7 @@ static int hdpvr_s_ctrl(struct v4l2_ctrl *ctrl)
        case V4L2_CID_MPEG_AUDIO_ENCODING:
                if (dev->flags & HDPVR_FLAG_AC3_CAP) {
                        opt->audio_codec = ctrl->val;
-                       return hdpvr_set_audio(dev, opt->audio_input,
+                       return hdpvr_set_audio(dev, opt->audio_input + 1,
                                              opt->audio_codec);
                }
                return 0;
@@ -1198,7 +1198,7 @@ int hdpvr_register_videodev(struct hdpvr_device *dev, struct device *parent,
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_AUDIO_ENCODING,
                ac3 ? V4L2_MPEG_AUDIO_ENCODING_AC3 : V4L2_MPEG_AUDIO_ENCODING_AAC,
-               0x7, V4L2_MPEG_AUDIO_ENCODING_AAC);
+               0x7, ac3 ? dev->options.audio_codec : V4L2_MPEG_AUDIO_ENCODING_AAC);
        v4l2_ctrl_new_std_menu(hdl, &hdpvr_ctrl_ops,
                V4L2_CID_MPEG_VIDEO_ENCODING,
                V4L2_MPEG_VIDEO_ENCODING_MPEG_4_AVC, 0x3,
index 4ae54ca..ce1c9f5 100644 (file)
@@ -610,10 +610,10 @@ struct v4l2_fract v4l2_calc_aspect_ratio(u8 hor_landscape, u8 vert_portrait)
                aspect.denominator = 9;
        } else if (ratio == 34) {
                aspect.numerator = 4;
-               aspect.numerator = 3;
+               aspect.denominator = 3;
        } else if (ratio == 68) {
                aspect.numerator = 15;
-               aspect.numerator = 9;
+               aspect.denominator = 9;
        } else {
                aspect.numerator = hor_landscape + 99;
                aspect.denominator = 100;
index 2a635b6..c880ba6 100644 (file)
@@ -601,6 +601,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
        pcr->slots[RTSX_MS_CARD].card_event = NULL;
        msh = host->msh;
        host->eject = true;
+       cancel_work_sync(&host->handle_req);
 
        mutex_lock(&host->host_mutex);
        if (host->req) {
index ee8204c..6cc4b6a 100644 (file)
@@ -760,6 +760,7 @@ config MFD_SYSCON
 config MFD_DAVINCI_VOICECODEC
        tristate
        select MFD_CORE
+       select REGMAP_MMIO
 
 config MFD_TI_AM335X_TSCADC
        tristate "TI ADC / Touch Screen chip support"
@@ -1225,7 +1226,7 @@ config MFD_WM8994
          functionaltiy of the device other drivers must be enabled.
 
 config MFD_STW481X
-       bool "Support for ST Microelectronics STw481x"
+       tristate "Support for ST Microelectronics STw481x"
        depends on I2C && ARCH_NOMADIK
        select REGMAP_I2C
        select MFD_CORE
@@ -1248,7 +1249,7 @@ config MCP_SA11X0
 
 # Chip drivers
 config MCP_UCB1200
-       bool "Support for UCB1200 / UCB1300"
+       tristate "Support for UCB1200 / UCB1300"
        depends on MCP_SA11X0
        select MCP
 
index a8ee4a3..cf2e6a1 100644 (file)
@@ -591,7 +591,7 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
                num_irqs = AB8500_NR_IRQS;
 
        /* If ->irq_base is zero this will give a linear mapping */
-       ab8500->domain = irq_domain_add_simple(NULL,
+       ab8500->domain = irq_domain_add_simple(ab8500->dev->of_node,
                        num_irqs, 0,
                        &ab8500_irq_ops, ab8500);
 
index a43d0c4..ee94023 100644 (file)
@@ -54,7 +54,7 @@ config AD525X_DPOT_SPI
 config ATMEL_PWM
        tristate "Atmel AT32/AT91 PWM support"
        depends on HAVE_CLK
-       depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45
+       depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
        help
          This option enables device driver support for the PWM channels
          on certain Atmel processors.  Pulse Width Modulation is used for
index 3fac67a..557f978 100644 (file)
@@ -544,7 +544,8 @@ xpnet_init(void)
         * use ether_setup() to init the majority of our device
         * structure and then override the necessary pieces.
         */
-       xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, ether_setup);
+       xpnet_device = alloc_netdev(0, XPNET_DEVICE_NAME, NET_NAME_UNKNOWN,
+                                   ether_setup);
        if (xpnet_device == NULL) {
                kfree(xpnet_broadcast_partitions);
                return -ENOMEM;
index 73068e5..3250fc1 100644 (file)
@@ -199,7 +199,7 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
        func = kzalloc(sizeof(*func) + sizeof(*func->template) * num,
                        GFP_KERNEL);
        if (!func)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        func->syscfg = syscfg;
        func->num_templates = num;
@@ -231,10 +231,14 @@ static struct regmap *vexpress_syscfg_regmap_init(struct device *dev,
        func->regmap = regmap_init(dev, NULL, func,
                        &vexpress_syscfg_regmap_config);
 
-       if (IS_ERR(func->regmap))
+       if (IS_ERR(func->regmap)) {
+               void *err = func->regmap;
+
                kfree(func);
-       else
-               list_add(&func->list, &syscfg->funcs);
+               return err;
+       }
+
+       list_add(&func->list, &syscfg->funcs);
 
        return func->regmap;
 }
index 2421835..1916174 100644 (file)
@@ -17,7 +17,8 @@
  * along with this program; if not, write to the Free Software
  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  *
- * Maintained by: Dmitry Torokhov <dtor@vmware.com>
+ * Maintained by:      Xavier Deguillard <xdeguillard@vmware.com>
+ *                     Philip Moltmann <moltmann@vmware.com>
  */
 
 /*
index e4ec355..a7543ba 100644 (file)
 /* Atmel chips */
 #define AT49BV640D     0x02de
 #define AT49BV640DT    0x02db
+/* Sharp chips */
+#define LH28F640BFHE_PTTL90    0x00b0
+#define LH28F640BFHE_PBTL90    0x00b1
+#define LH28F640BFHE_PTTL70A   0x00b2
+#define LH28F640BFHE_PBTL70A   0x00b3
 
 static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
 static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
                (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
 };
 
+static int is_LH28F640BF(struct cfi_private *cfi)
+{
+       /* Sharp LH28F640BF Family */
+       if (cfi->mfr == CFI_MFR_SHARP && (
+           cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
+           cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
+               return 1;
+       return 0;
+}
+
+static void fixup_LH28F640BF(struct mtd_info *mtd)
+{
+       struct map_info *map = mtd->priv;
+       struct cfi_private *cfi = map->fldrv_priv;
+       struct cfi_pri_intelext *extp = cfi->cmdset_priv;
+
+       /* Reset the Partition Configuration Register on LH28F640BF
+        * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
+       if (is_LH28F640BF(cfi)) {
+               printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
+               map_write(map, CMD(0x60), 0);
+               map_write(map, CMD(0x04), 0);
+
+               /* We have set one single partition thus
+                * Simultaneous Operations are not allowed */
+               printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
+               extp->FeatureSupport &= ~512;
+       }
+}
+
 static void fixup_use_point(struct mtd_info *mtd)
 {
        struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
        { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
        { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
        { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
+       { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
+       { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
        { 0, 0, NULL }
 };
 
@@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
        initial_adr = adr;
        cmd_adr = adr & ~(wbufsize-1);
 
+       /* Sharp LH28F640BF chips need the first address for the
+        * Page Buffer Program command. See Table 5 of
+        * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
+       if (is_LH28F640BF(cfi))
+               cmd_adr = adr;
+
        /* Let's determine this according to the interleave only once */
        write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
 
index 7df8694..b4f61c7 100644 (file)
@@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
                                        ELM_SYNDROME_FRAGMENT_1 + offset);
                        regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
                                        ELM_SYNDROME_FRAGMENT_0 + offset);
+                       break;
                default:
                        return -EINVAL;
                }
@@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
                                        regs->elm_syndrome_fragment_1[i]);
                        elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
                                        regs->elm_syndrome_fragment_0[i]);
+                       break;
                default:
                        return -EINVAL;
                }
index 41167e9..4f3e80c 100644 (file)
@@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
                ecc->layout->oobavail += ecc->layout->oobfree[i].length;
        mtd->oobavail = ecc->layout->oobavail;
 
-       /* ECC sanity check: warn noisily if it's too weak */
-       WARN_ON(!nand_ecc_strength_good(mtd));
+       /* ECC sanity check: warn if it's too weak */
+       if (!nand_ecc_strength_good(mtd))
+               pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
+                       mtd->name);
 
        /*
         * Set the number of read / write steps for one page depending on ECC
index b04e7d0..0431b46 100644 (file)
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
                parent = *p;
                av = rb_entry(parent, struct ubi_ainf_volume, rb);
 
-               if (vol_id < av->vol_id)
+               if (vol_id > av->vol_id)
                        p = &(*p)->rb_left;
                else
                        p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
                                pnum, err);
                        ret = err > 0 ? UBI_BAD_FASTMAP : err;
                        goto out;
-               } else if (ret == UBI_IO_BITFLIPS)
+               } else if (err == UBI_IO_BITFLIPS)
                        scrub = 1;
 
                /*
index 89402c3..c6f6f69 100644 (file)
@@ -148,6 +148,7 @@ config VXLAN
        tristate "Virtual eXtensible Local Area Network (VXLAN)"
        depends on INET
        select NET_IP_TUNNEL
+       select NET_UDP_TUNNEL
        ---help---
          This allows one to create vxlan virtual interfaces that provide
          Layer 2 Networks over Layer 3 Networks. VXLAN is often used
index 3fef8a8..fa49d45 100644 (file)
@@ -67,7 +67,6 @@ obj-$(CONFIG_USB_PEGASUS)       += usb/
 obj-$(CONFIG_USB_RTL8150)       += usb/
 obj-$(CONFIG_USB_HSO)          += usb/
 obj-$(CONFIG_USB_USBNET)        += usb/
-obj-$(CONFIG_USB_ZD1201)        += usb/
 obj-$(CONFIG_USB_IPHETH)        += usb/
 obj-$(CONFIG_USB_CDC_PHONET)   += usb/
 
index a956053..3b790de 100644 (file)
@@ -346,7 +346,8 @@ struct net_device *alloc_arcdev(const char *name)
        struct net_device *dev;
 
        dev = alloc_netdev(sizeof(struct arcnet_local),
-                          name && *name ? name : "arc%d", arcdev_setup);
+                          name && *name ? name : "arc%d", NET_NAME_UNKNOWN,
+                          arcdev_setup);
        if(dev) {
                struct arcnet_local *lp = netdev_priv(dev);
                spin_lock_init(&lp->lock);
index 0dfeaf5..ee2c73a 100644 (file)
@@ -20,8 +20,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/skbuff.h>
 #include <linux/if_ether.h>
 #include <linux/netdevice.h>
@@ -301,8 +299,8 @@ static u16 __get_link_speed(struct port *port)
                }
        }
 
-       pr_debug("Port %d Received link speed %d update from adapter\n",
-                port->actor_port_number, speed);
+       netdev_dbg(slave->bond->dev, "Port %d Received link speed %d update from adapter\n",
+                  port->actor_port_number, speed);
        return speed;
 }
 
@@ -329,14 +327,14 @@ static u8 __get_duplex(struct port *port)
                switch (slave->duplex) {
                case DUPLEX_FULL:
                        retval = 0x1;
-                       pr_debug("Port %d Received status full duplex update from adapter\n",
-                                port->actor_port_number);
+                       netdev_dbg(slave->bond->dev, "Port %d Received status full duplex update from adapter\n",
+                                  port->actor_port_number);
                        break;
                case DUPLEX_HALF:
                default:
                        retval = 0x0;
-                       pr_debug("Port %d Received status NOT full duplex update from adapter\n",
-                                port->actor_port_number);
+                       netdev_dbg(slave->bond->dev, "Port %d Received status NOT full duplex update from adapter\n",
+                                  port->actor_port_number);
                        break;
                }
        }
@@ -1079,9 +1077,8 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
                        /* detect loopback situation */
                        if (MAC_ADDRESS_EQUAL(&(lacpdu->actor_system),
                                              &(port->actor_system))) {
-                               pr_err("%s: An illegal loopback occurred on adapter (%s)\n"
+                               netdev_err(port->slave->bond->dev, "An illegal loopback occurred on adapter (%s)\n"
                                       "Check the configuration to verify that all adapters are connected to 802.3ad compliant switch ports\n",
-                                      port->slave->bond->dev->name,
                                       port->slave->dev->name);
                                return;
                        }
@@ -1269,9 +1266,9 @@ static void ad_port_selection_logic(struct port *port)
                                port->next_port_in_aggregator = NULL;
                                port->actor_port_aggregator_identifier = 0;
 
-                               pr_debug("Port %d left LAG %d\n",
-                                        port->actor_port_number,
-                                        temp_aggregator->aggregator_identifier);
+                               netdev_dbg(bond->dev, "Port %d left LAG %d\n",
+                                          port->actor_port_number,
+                                          temp_aggregator->aggregator_identifier);
                                /* if the aggregator is empty, clear its
                                 * parameters, and set it ready to be attached
                                 */
@@ -1284,11 +1281,11 @@ static void ad_port_selection_logic(struct port *port)
                        /* meaning: the port was related to an aggregator
                         * but was not on the aggregator port list
                         */
-                       pr_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
-                                           port->slave->bond->dev->name,
-                                           port->actor_port_number,
-                                           port->slave->dev->name,
-                                           port->aggregator->aggregator_identifier);
+                       net_warn_ratelimited("%s: Warning: Port %d (on %s) was related to aggregator %d but was not on its port list\n",
+                                            port->slave->bond->dev->name,
+                                            port->actor_port_number,
+                                            port->slave->dev->name,
+                                            port->aggregator->aggregator_identifier);
                }
        }
        /* search on all aggregators for a suitable aggregator for this port */
@@ -1318,9 +1315,9 @@ static void ad_port_selection_logic(struct port *port)
                        port->next_port_in_aggregator = aggregator->lag_ports;
                        port->aggregator->num_of_ports++;
                        aggregator->lag_ports = port;
-                       pr_debug("Port %d joined LAG %d(existing LAG)\n",
-                                port->actor_port_number,
-                                port->aggregator->aggregator_identifier);
+                       netdev_dbg(bond->dev, "Port %d joined LAG %d(existing LAG)\n",
+                                  port->actor_port_number,
+                                  port->aggregator->aggregator_identifier);
 
                        /* mark this port as selected */
                        port->sm_vars |= AD_PORT_SELECTED;
@@ -1363,12 +1360,11 @@ static void ad_port_selection_logic(struct port *port)
                        /* mark this port as selected */
                        port->sm_vars |= AD_PORT_SELECTED;
 
-                       pr_debug("Port %d joined LAG %d(new LAG)\n",
-                                port->actor_port_number,
-                                port->aggregator->aggregator_identifier);
+                       netdev_dbg(bond->dev, "Port %d joined LAG %d(new LAG)\n",
+                                  port->actor_port_number,
+                                  port->aggregator->aggregator_identifier);
                } else {
-                       pr_err("%s: Port %d (on %s) did not find a suitable aggregator\n",
-                              port->slave->bond->dev->name,
+                       netdev_err(bond->dev, "Port %d (on %s) did not find a suitable aggregator\n",
                               port->actor_port_number, port->slave->dev->name);
                }
        }
@@ -1445,9 +1441,9 @@ static struct aggregator *ad_agg_selection_test(struct aggregator *best,
                break;
 
        default:
-               pr_warn_ratelimited("%s: Impossible agg select mode %d\n",
-                                   curr->slave->bond->dev->name,
-                                   __get_agg_selection_mode(curr->lag_ports));
+               net_warn_ratelimited("%s: Impossible agg select mode %d\n",
+                                    curr->slave->bond->dev->name,
+                                    __get_agg_selection_mode(curr->lag_ports));
                break;
        }
 
@@ -1539,40 +1535,40 @@ static void ad_agg_selection_logic(struct aggregator *agg)
 
        /* if there is new best aggregator, activate it */
        if (best) {
-               pr_debug("best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
-                        best->aggregator_identifier, best->num_of_ports,
-                        best->actor_oper_aggregator_key,
-                        best->partner_oper_aggregator_key,
-                        best->is_individual, best->is_active);
-               pr_debug("best ports %p slave %p %s\n",
-                        best->lag_ports, best->slave,
-                        best->slave ? best->slave->dev->name : "NULL");
+               netdev_dbg(bond->dev, "best Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+                          best->aggregator_identifier, best->num_of_ports,
+                          best->actor_oper_aggregator_key,
+                          best->partner_oper_aggregator_key,
+                          best->is_individual, best->is_active);
+               netdev_dbg(bond->dev, "best ports %p slave %p %s\n",
+                          best->lag_ports, best->slave,
+                          best->slave ? best->slave->dev->name : "NULL");
 
                bond_for_each_slave_rcu(bond, slave, iter) {
                        agg = &(SLAVE_AD_INFO(slave)->aggregator);
 
-                       pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
-                                agg->aggregator_identifier, agg->num_of_ports,
-                                agg->actor_oper_aggregator_key,
-                                agg->partner_oper_aggregator_key,
-                                agg->is_individual, agg->is_active);
+                       netdev_dbg(bond->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+                                  agg->aggregator_identifier, agg->num_of_ports,
+                                  agg->actor_oper_aggregator_key,
+                                  agg->partner_oper_aggregator_key,
+                                  agg->is_individual, agg->is_active);
                }
 
                /* check if any partner replys */
                if (best->is_individual) {
-                       pr_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
-                                           best->slave ?
-                                           best->slave->bond->dev->name : "NULL");
+                       net_warn_ratelimited("%s: Warning: No 802.3ad response from the link partner for any adapters in the bond\n",
+                                            best->slave ?
+                                            best->slave->bond->dev->name : "NULL");
                }
 
                best->is_active = 1;
-               pr_debug("LAG %d chosen as the active LAG\n",
-                        best->aggregator_identifier);
-               pr_debug("Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
-                        best->aggregator_identifier, best->num_of_ports,
-                        best->actor_oper_aggregator_key,
-                        best->partner_oper_aggregator_key,
-                        best->is_individual, best->is_active);
+               netdev_dbg(bond->dev, "LAG %d chosen as the active LAG\n",
+                          best->aggregator_identifier);
+               netdev_dbg(bond->dev, "Agg=%d; P=%d; a k=%d; p k=%d; Ind=%d; Act=%d\n",
+                          best->aggregator_identifier, best->num_of_ports,
+                          best->actor_oper_aggregator_key,
+                          best->partner_oper_aggregator_key,
+                          best->is_individual, best->is_active);
 
                /* disable the ports that were related to the former
                 * active_aggregator
@@ -1908,13 +1904,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("Warning: %s: Trying to unbind an uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(bond->dev, "Trying to unbind an uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
-       pr_debug("Unbinding Link Aggregation Group %d\n",
-                aggregator->aggregator_identifier);
+       netdev_dbg(bond->dev, "Unbinding Link Aggregation Group %d\n",
+                  aggregator->aggregator_identifier);
 
        /* Tell the partner that this port is not suitable for aggregation */
        port->actor_oper_port_state &= ~AD_STATE_AGGREGATION;
@@ -1949,14 +1945,13 @@ void bond_3ad_unbind_slave(struct slave *slave)
                         * new aggregator
                         */
                        if ((new_aggregator) && ((!new_aggregator->lag_ports) || ((new_aggregator->lag_ports == port) && !new_aggregator->lag_ports->next_port_in_aggregator))) {
-                               pr_debug("Some port(s) related to LAG %d - replacing with LAG %d\n",
-                                        aggregator->aggregator_identifier,
-                                        new_aggregator->aggregator_identifier);
+                               netdev_dbg(bond->dev, "Some port(s) related to LAG %d - replacing with LAG %d\n",
+                                          aggregator->aggregator_identifier,
+                                          new_aggregator->aggregator_identifier);
 
                                if ((new_aggregator->lag_ports == port) &&
                                    new_aggregator->is_active) {
-                                       pr_info("%s: Removing an active aggregator\n",
-                                               aggregator->slave->bond->dev->name);
+                                       netdev_info(bond->dev, "Removing an active aggregator\n");
                                         select_new_active_agg = 1;
                                }
 
@@ -1986,8 +1981,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                if (select_new_active_agg)
                                        ad_agg_selection_logic(__get_first_agg(port));
                        } else {
-                               pr_warn("%s: Warning: unbinding aggregator, and could not find a new aggregator for its ports\n",
-                                       slave->bond->dev->name);
+                               netdev_warn(bond->dev, "unbinding aggregator, and could not find a new aggregator for its ports\n");
                        }
                } else {
                        /* in case that the only port related to this
@@ -1996,8 +1990,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                        select_new_active_agg = aggregator->is_active;
                        ad_clear_agg(aggregator);
                        if (select_new_active_agg) {
-                               pr_info("%s: Removing an active aggregator\n",
-                                       slave->bond->dev->name);
+                               netdev_info(bond->dev, "Removing an active aggregator\n");
                                /* select new active aggregator */
                                temp_aggregator = __get_first_agg(port);
                                if (temp_aggregator)
@@ -2006,7 +1999,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                }
        }
 
-       pr_debug("Unbinding port %d\n", port->actor_port_number);
+       netdev_dbg(bond->dev, "Unbinding port %d\n", port->actor_port_number);
 
        /* find the aggregator that this port is connected to */
        bond_for_each_slave(bond, slave_iter, iter) {
@@ -2029,8 +2022,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
                                        select_new_active_agg = temp_aggregator->is_active;
                                        ad_clear_agg(temp_aggregator);
                                        if (select_new_active_agg) {
-                                               pr_info("%s: Removing an active aggregator\n",
-                                                       slave->bond->dev->name);
+                                               netdev_info(bond->dev, "Removing an active aggregator\n");
                                                /* select new active aggregator */
                                                ad_agg_selection_logic(__get_first_agg(port));
                                        }
@@ -2081,8 +2073,8 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
                /* select the active aggregator for the bond */
                if (port) {
                        if (!port->slave) {
-                               pr_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
-                                                   bond->dev->name);
+                               net_warn_ratelimited("%s: Warning: bond's first port is uninitialized\n",
+                                                    bond->dev->name);
                                goto re_arm;
                        }
 
@@ -2096,7 +2088,7 @@ void bond_3ad_state_machine_handler(struct work_struct *work)
        bond_for_each_slave_rcu(bond, slave, iter) {
                port = &(SLAVE_AD_INFO(slave)->port);
                if (!port->slave) {
-                       pr_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
+                       net_warn_ratelimited("%s: Warning: Found an uninitialized port\n",
                                            bond->dev->name);
                        goto re_arm;
                }
@@ -2158,16 +2150,16 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
                port = &(SLAVE_AD_INFO(slave)->port);
 
                if (!port->slave) {
-                       pr_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
-                                           slave->dev->name, slave->bond->dev->name);
+                       net_warn_ratelimited("%s: Warning: port of slave %s is uninitialized\n",
+                                            slave->dev->name, slave->bond->dev->name);
                        return ret;
                }
 
                switch (lacpdu->subtype) {
                case AD_TYPE_LACPDU:
                        ret = RX_HANDLER_CONSUMED;
-                       pr_debug("Received LACPDU on port %d\n",
-                                port->actor_port_number);
+                       netdev_dbg(slave->bond->dev, "Received LACPDU on port %d\n",
+                                  port->actor_port_number);
                        /* Protect against concurrent state machines */
                        __get_state_machine_lock(port);
                        ad_rx_machine(lacpdu, port);
@@ -2182,20 +2174,20 @@ static int bond_3ad_rx_indication(struct lacpdu *lacpdu, struct slave *slave,
 
                        switch (((struct bond_marker *)lacpdu)->tlv_type) {
                        case AD_MARKER_INFORMATION_SUBTYPE:
-                               pr_debug("Received Marker Information on port %d\n",
-                                        port->actor_port_number);
+                               netdev_dbg(slave->bond->dev, "Received Marker Information on port %d\n",
+                                          port->actor_port_number);
                                ad_marker_info_received((struct bond_marker *)lacpdu, port);
                                break;
 
                        case AD_MARKER_RESPONSE_SUBTYPE:
-                               pr_debug("Received Marker Response on port %d\n",
-                                        port->actor_port_number);
+                               netdev_dbg(slave->bond->dev, "Received Marker Response on port %d\n",
+                                          port->actor_port_number);
                                ad_marker_response_received((struct bond_marker *)lacpdu, port);
                                break;
 
                        default:
-                               pr_debug("Received an unknown Marker subtype on slot %d\n",
-                                        port->actor_port_number);
+                               netdev_dbg(slave->bond->dev, "Received an unknown Marker subtype on slot %d\n",
+                                          port->actor_port_number);
                        }
                }
        }
@@ -2216,8 +2208,8 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("Warning: %s: speed changed for uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(slave->bond->dev, "speed changed for uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
@@ -2226,7 +2218,7 @@ void bond_3ad_adapter_speed_changed(struct slave *slave)
        port->actor_admin_port_key &= ~AD_SPEED_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                (__get_link_speed(port) << 1);
-       pr_debug("Port %d changed speed\n", port->actor_port_number);
+       netdev_dbg(slave->bond->dev, "Port %d changed speed\n", port->actor_port_number);
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
@@ -2249,8 +2241,8 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("%s: Warning: duplex changed for uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(slave->bond->dev, "duplex changed for uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
@@ -2259,7 +2251,7 @@ void bond_3ad_adapter_duplex_changed(struct slave *slave)
        port->actor_admin_port_key &= ~AD_DUPLEX_KEY_BITS;
        port->actor_oper_port_key = port->actor_admin_port_key |=
                __get_duplex(port);
-       pr_debug("Port %d changed duplex\n", port->actor_port_number);
+       netdev_dbg(slave->bond->dev, "Port %d changed duplex\n", port->actor_port_number);
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
@@ -2283,8 +2275,8 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
 
        /* if slave is null, the whole port is not initialized */
        if (!port->slave) {
-               pr_warn("Warning: %s: link status changed for uninitialized port on %s\n",
-                       slave->bond->dev->name, slave->dev->name);
+               netdev_warn(slave->bond->dev, "link status changed for uninitialized port on %s\n",
+                           slave->dev->name);
                return;
        }
 
@@ -2311,9 +2303,9 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
                port->actor_oper_port_key = (port->actor_admin_port_key &=
                                             ~AD_SPEED_KEY_BITS);
        }
-       pr_debug("Port %d changed link status to %s\n",
-                port->actor_port_number,
-                link == BOND_LINK_UP ? "UP" : "DOWN");
+       netdev_dbg(slave->bond->dev, "Port %d changed link status to %s\n",
+                  port->actor_port_number,
+                  link == BOND_LINK_UP ? "UP" : "DOWN");
        /* there is no need to reselect a new aggregator, just signal the
         * state machines to reinitialize
         */
@@ -2427,8 +2419,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        int agg_id;
 
        if (__bond_3ad_get_active_agg_info(bond, &ad_info)) {
-               pr_debug("%s: Error: __bond_3ad_get_active_agg_info failed\n",
-                        dev->name);
+               netdev_dbg(dev, "__bond_3ad_get_active_agg_info failed\n");
                goto err_free;
        }
 
@@ -2436,7 +2427,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        agg_id = ad_info.aggregator_id;
 
        if (slaves_in_agg == 0) {
-               pr_debug("%s: Error: active aggregator is empty\n", dev->name);
+               netdev_dbg(dev, "active aggregator is empty\n");
                goto err_free;
        }
 
@@ -2462,8 +2453,8 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
        }
 
        if (slave_agg_no >= 0) {
-               pr_err("%s: Error: Couldn't find a slave to tx on for aggregator ID %d\n",
-                      dev->name, agg_id);
+               netdev_err(dev, "Couldn't find a slave to tx on for aggregator ID %d\n",
+                          agg_id);
                goto err_free;
        }
 
index 76c0dad..95dd1f5 100644 (file)
@@ -19,8 +19,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/skbuff.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
@@ -202,6 +200,7 @@ static int tlb_initialize(struct bonding *bond)
 static void tlb_deinitialize(struct bonding *bond)
 {
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct tlb_up_slave *arr;
 
        _lock_tx_hashtbl_bh(bond);
 
@@ -209,6 +208,10 @@ static void tlb_deinitialize(struct bonding *bond)
        bond_info->tx_hashtbl = NULL;
 
        _unlock_tx_hashtbl_bh(bond);
+
+       arr = rtnl_dereference(bond_info->slave_arr);
+       if (arr)
+               kfree_rcu(arr, rcu);
 }
 
 static long long compute_gap(struct slave *slave)
@@ -369,7 +372,7 @@ static int rlb_arp_recv(const struct sk_buff *skb, struct bonding *bond,
        if (arp->op_code == htons(ARPOP_REPLY)) {
                /* update rx hash table for this ARP */
                rlb_update_entry_from_arp(bond, arp);
-               pr_debug("Server received an ARP Reply from client\n");
+               netdev_dbg(bond->dev, "Server received an ARP Reply from client\n");
        }
 out:
        return RX_HANDLER_ANOTHER;
@@ -448,11 +451,13 @@ static struct slave *__rlb_next_rx_slave(struct bonding *bond)
  */
 static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 {
-       if (!bond->curr_active_slave)
+       struct slave *curr_active = bond_deref_active_protected(bond);
+
+       if (!curr_active)
                return;
 
        if (!bond->alb_info.primary_is_promisc) {
-               if (!dev_set_promiscuity(bond->curr_active_slave->dev, 1))
+               if (!dev_set_promiscuity(curr_active->dev, 1))
                        bond->alb_info.primary_is_promisc = 1;
                else
                        bond->alb_info.primary_is_promisc = 0;
@@ -460,7 +465,7 @@ static void rlb_teach_disabled_mac_on_primary(struct bonding *bond, u8 addr[])
 
        bond->alb_info.rlb_promisc_timeout_counter = 0;
 
-       alb_send_learning_packets(bond->curr_active_slave, addr, true);
+       alb_send_learning_packets(curr_active, addr, true);
 }
 
 /* slave being removed should not be active at this point
@@ -509,7 +514,7 @@ static void rlb_clear_slave(struct bonding *bond, struct slave *slave)
 
        write_lock_bh(&bond->curr_slave_lock);
 
-       if (slave != bond->curr_active_slave)
+       if (slave != bond_deref_active_protected(bond))
                rlb_teach_disabled_mac_on_primary(bond, slave->dev->dev_addr);
 
        write_unlock_bh(&bond->curr_slave_lock);
@@ -533,8 +538,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
                                 client_info->slave->dev->dev_addr,
                                 client_info->mac_dst);
                if (!skb) {
-                       pr_err("%s: Error: failed to create an ARP packet\n",
-                              client_info->slave->bond->dev->name);
+                       netdev_err(client_info->slave->bond->dev,
+                                  "failed to create an ARP packet\n");
                        continue;
                }
 
@@ -543,8 +548,8 @@ static void rlb_update_client(struct rlb_client_info *client_info)
                if (client_info->vlan_id) {
                        skb = vlan_put_tag(skb, htons(ETH_P_8021Q), client_info->vlan_id);
                        if (!skb) {
-                               pr_err("%s: Error: failed to insert VLAN tag\n",
-                                      client_info->slave->bond->dev->name);
+                               netdev_err(client_info->slave->bond->dev,
+                                          "failed to insert VLAN tag\n");
                                continue;
                        }
                }
@@ -628,8 +633,7 @@ static void rlb_req_update_subnet_clients(struct bonding *bond, __be32 src_ip)
                client_info = &(bond_info->rx_hashtbl[hash_index]);
 
                if (!client_info->slave) {
-                       pr_err("%s: Error: found a client with no channel in the client's hash table\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "found a client with no channel in the client's hash table\n");
                        continue;
                }
                /*update all clients using this src_ip, that are not assigned
@@ -684,7 +688,7 @@ static struct slave *rlb_choose_channel(struct sk_buff *skb, struct bonding *bon
                         * move the old client to primary (curr_active_slave) so
                         * that the new client can be assigned to this entry.
                         */
-                       if (bond->curr_active_slave &&
+                       if (curr_active_slave &&
                            client_info->slave != curr_active_slave) {
                                client_info->slave = curr_active_slave;
                                rlb_update_client(client_info);
@@ -765,7 +769,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                tx_slave = rlb_choose_channel(skb, bond);
                if (tx_slave)
                        ether_addr_copy(arp->mac_src, tx_slave->dev->dev_addr);
-               pr_debug("Server sent ARP Reply packet\n");
+               netdev_dbg(bond->dev, "Server sent ARP Reply packet\n");
        } else if (arp->op_code == htons(ARPOP_REQUEST)) {
                /* Create an entry in the rx_hashtbl for this client as a
                 * place holder.
@@ -785,7 +789,7 @@ static struct slave *rlb_arp_xmit(struct sk_buff *skb, struct bonding *bond)
                 * updated with their assigned mac.
                 */
                rlb_req_update_subnet_clients(bond, arp->ip_src);
-               pr_debug("Server sent ARP Request packet\n");
+               netdev_dbg(bond->dev, "Server sent ARP Request packet\n");
        }
 
        return tx_slave;
@@ -1024,8 +1028,7 @@ static void alb_send_lp_vid(struct slave *slave, u8 mac_addr[],
        if (vid) {
                skb = vlan_put_tag(skb, vlan_proto, vid);
                if (!skb) {
-                       pr_err("%s: Error: failed to insert VLAN tag\n",
-                              slave->bond->dev->name);
+                       netdev_err(slave->bond->dev, "failed to insert VLAN tag\n");
                        return;
                }
        }
@@ -1039,7 +1042,7 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
        struct bonding *bond = bond_get_bond_by_slave(slave);
        struct net_device *upper;
        struct list_head *iter;
-       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
+       struct bond_vlan_tag *tags;
 
        /* send untagged */
        alb_send_lp_vid(slave, mac_addr, 0, 0);
@@ -1067,10 +1070,12 @@ static void alb_send_learning_packets(struct slave *slave, u8 mac_addr[],
                 * when strict_match is turned off.
                 */
                if (netif_is_macvlan(upper) && !strict_match) {
-                       memset(tags, 0, sizeof(tags));
-                       bond_verify_device_path(bond->dev, upper, tags);
+                       tags = bond_verify_device_path(bond->dev, upper, 0);
+                       if (IS_ERR_OR_NULL(tags))
+                               BUG();
                        alb_send_lp_vid(slave, upper->dev_addr,
                                        tags[0].vlan_proto, tags[0].vlan_id);
+                       kfree(tags);
                }
        }
        rcu_read_unlock();
@@ -1091,9 +1096,8 @@ static int alb_set_slave_mac_addr(struct slave *slave, u8 addr[])
        memcpy(s_addr.sa_data, addr, dev->addr_len);
        s_addr.sa_family = dev->type;
        if (dev_set_mac_address(dev, &s_addr)) {
-               pr_err("%s: Error: dev_set_mac_address of dev %s failed!\n"
-                      "ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
-                      slave->bond->dev->name, dev->name);
+               netdev_err(slave->bond->dev, "dev_set_mac_address of dev %s failed! ALB mode requires that the base driver support setting the hw address also when the network device's interface is open\n",
+                          dev->name);
                return -EOPNOTSUPP;
        }
        return 0;
@@ -1221,7 +1225,7 @@ static void alb_change_hw_addr_on_detach(struct bonding *bond, struct slave *sla
  */
 static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slave *slave)
 {
-       struct slave *has_bond_addr = bond->curr_active_slave;
+       struct slave *has_bond_addr = rcu_access_pointer(bond->curr_active_slave);
        struct slave *tmp_slave1, *free_mac_slave = NULL;
        struct list_head *iter;
 
@@ -1267,13 +1271,12 @@ static int alb_handle_addr_collision_on_attach(struct bonding *bond, struct slav
        if (free_mac_slave) {
                alb_set_slave_mac_addr(slave, free_mac_slave->perm_hwaddr);
 
-               pr_warn("%s: Warning: the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
-                       bond->dev->name, slave->dev->name,
-                       free_mac_slave->dev->name);
+               netdev_warn(bond->dev, "the hw address of slave %s is in use by the bond; giving it the hw address of %s\n",
+                           slave->dev->name, free_mac_slave->dev->name);
 
        } else if (has_bond_addr) {
-               pr_err("%s: Error: the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
-                      bond->dev->name, slave->dev->name);
+               netdev_err(bond->dev, "the hw address of slave %s is in use by the bond; couldn't find a slave with a free hw address to give it (this should not have happened)\n",
+                          slave->dev->name);
                return -EFAULT;
        }
 
@@ -1406,9 +1409,39 @@ out:
        return NETDEV_TX_OK;
 }
 
+static int bond_tlb_update_slave_arr(struct bonding *bond,
+                                    struct slave *skipslave)
+{
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
+       struct slave *tx_slave;
+       struct list_head *iter;
+       struct tlb_up_slave *new_arr, *old_arr;
+
+       new_arr = kzalloc(offsetof(struct tlb_up_slave, arr[bond->slave_cnt]),
+                         GFP_ATOMIC);
+       if (!new_arr)
+               return -ENOMEM;
+
+       bond_for_each_slave(bond, tx_slave, iter) {
+               if (!bond_slave_can_tx(tx_slave))
+                       continue;
+               if (skipslave == tx_slave)
+                       continue;
+               new_arr->arr[new_arr->count++] = tx_slave;
+       }
+
+       old_arr = rtnl_dereference(bond_info->slave_arr);
+       rcu_assign_pointer(bond_info->slave_arr, new_arr);
+       if (old_arr)
+               kfree_rcu(old_arr, rcu);
+
+       return 0;
+}
+
 int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
+       struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
        struct ethhdr *eth_data;
        struct slave *tx_slave = NULL;
        u32 hash_index;
@@ -1429,12 +1462,12 @@ int bond_tlb_xmit(struct sk_buff *skb, struct net_device *bond_dev)
                                                              hash_index & 0xFF,
                                                              skb->len);
                        } else {
-                               struct list_head *iter;
-                               int idx = hash_index % bond->slave_cnt;
+                               struct tlb_up_slave *slaves;
 
-                               bond_for_each_slave_rcu(bond, tx_slave, iter)
-                                       if (--idx < 0)
-                                               break;
+                               slaves = rcu_dereference(bond_info->slave_arr);
+                               if (slaves && slaves->count)
+                                       tx_slave = slaves->arr[hash_index %
+                                                              slaves->count];
                        }
                        break;
                }
@@ -1575,7 +1608,7 @@ void bond_alb_monitor(struct work_struct *work)
                         * use mac of the slave device.
                         * In RLB mode, we always use strict matches.
                         */
-                       strict_match = (slave != bond->curr_active_slave ||
+                       strict_match = (slave != rcu_access_pointer(bond->curr_active_slave) ||
                                        bond_info->rlb_enabled);
                        alb_send_learning_packets(slave, slave->dev->dev_addr,
                                                  strict_match);
@@ -1593,7 +1626,7 @@ void bond_alb_monitor(struct work_struct *work)
 
                bond_for_each_slave_rcu(bond, slave, iter) {
                        tlb_clear_slave(bond, slave, 1);
-                       if (slave == bond->curr_active_slave) {
+                       if (slave == rcu_access_pointer(bond->curr_active_slave)) {
                                SLAVE_TLB_INFO(slave).load =
                                        bond_info->unbalanced_load /
                                                BOND_TLB_REBALANCE_INTERVAL;
@@ -1625,7 +1658,8 @@ void bond_alb_monitor(struct work_struct *work)
                         * because a slave was disabled then
                         * it can now leave promiscuous mode.
                         */
-                       dev_set_promiscuity(bond->curr_active_slave->dev, -1);
+                       dev_set_promiscuity(rtnl_dereference(bond->curr_active_slave)->dev,
+                                           -1);
                        bond_info->primary_is_promisc = 0;
 
                        rtnl_unlock();
@@ -1698,6 +1732,11 @@ void bond_alb_deinit_slave(struct bonding *bond, struct slave *slave)
                bond->alb_info.rx_slave = NULL;
                rlb_clear_slave(bond, slave);
        }
+
+       if (bond_is_nondyn_tlb(bond))
+               if (bond_tlb_update_slave_arr(bond, slave))
+                       pr_err("Failed to build slave-array for TLB mode.\n");
+
 }
 
 /* Caller must hold bond lock for read */
@@ -1721,6 +1760,11 @@ void bond_alb_handle_link_change(struct bonding *bond, struct slave *slave, char
                         */
                }
        }
+
+       if (bond_is_nondyn_tlb(bond)) {
+               if (bond_tlb_update_slave_arr(bond, NULL))
+                       pr_err("Failed to build slave-array for TLB mode.\n");
+       }
 }
 
 /**
@@ -1742,17 +1786,21 @@ void bond_alb_handle_active_change(struct bonding *bond, struct slave *new_slave
        __acquires(&bond->curr_slave_lock)
 {
        struct slave *swap_slave;
+       struct slave *curr_active;
 
-       if (bond->curr_active_slave == new_slave)
+       curr_active = rcu_dereference_protected(bond->curr_active_slave,
+                                               !new_slave ||
+                                               lockdep_is_held(&bond->curr_slave_lock));
+       if (curr_active == new_slave)
                return;
 
-       if (bond->curr_active_slave && bond->alb_info.primary_is_promisc) {
-               dev_set_promiscuity(bond->curr_active_slave->dev, -1);
+       if (curr_active && bond->alb_info.primary_is_promisc) {
+               dev_set_promiscuity(curr_active->dev, -1);
                bond->alb_info.primary_is_promisc = 0;
                bond->alb_info.rlb_promisc_timeout_counter = 0;
        }
 
-       swap_slave = bond->curr_active_slave;
+       swap_slave = curr_active;
        rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
        if (!new_slave || !bond_has_slaves(bond))
@@ -1818,6 +1866,7 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
 {
        struct bonding *bond = netdev_priv(bond_dev);
        struct sockaddr *sa = addr;
+       struct slave *curr_active;
        struct slave *swap_slave;
        int res;
 
@@ -1834,23 +1883,24 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
         * Otherwise we'll need to pass the new address to it and handle
         * duplications.
         */
-       if (!bond->curr_active_slave)
+       curr_active = rtnl_dereference(bond->curr_active_slave);
+       if (!curr_active)
                return 0;
 
        swap_slave = bond_slave_has_mac(bond, bond_dev->dev_addr);
 
        if (swap_slave) {
-               alb_swap_mac_addr(swap_slave, bond->curr_active_slave);
-               alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
+               alb_swap_mac_addr(swap_slave, curr_active);
+               alb_fasten_mac_swap(bond, swap_slave, curr_active);
        } else {
-               alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr);
+               alb_set_slave_mac_addr(curr_active, bond_dev->dev_addr);
 
                read_lock(&bond->lock);
-               alb_send_learning_packets(bond->curr_active_slave,
+               alb_send_learning_packets(curr_active,
                                          bond_dev->dev_addr, false);
                if (bond->alb_info.rlb_enabled) {
                        /* inform clients mac address has changed */
-                       rlb_req_update_slave_clients(bond, bond->curr_active_slave);
+                       rlb_req_update_slave_clients(bond, curr_active);
                }
                read_unlock(&bond->lock);
        }
index 5fc76c0..aaeac61 100644 (file)
@@ -139,12 +139,20 @@ struct tlb_slave_info {
                         */
 };
 
+struct tlb_up_slave {
+       unsigned int    count;
+       struct rcu_head rcu;
+       struct slave    *arr[0];
+};
+
 struct alb_bond_info {
        struct tlb_client_info  *tx_hashtbl; /* Dynamically allocated */
        spinlock_t              tx_hashtbl_lock;
        u32                     unbalanced_load;
        int                     tx_rebalance_counter;
        int                     lp_counter;
+       /* -------- non-dynamic tlb mode only ---------*/
+       struct tlb_up_slave __rcu *slave_arr;     /* Up slaves */
        /* -------- rlb parameters -------- */
        int rlb_enabled;
        struct rlb_client_info  *rx_hashtbl;    /* Receive hash table */
index 658e761..280971b 100644 (file)
@@ -69,8 +69,7 @@ void bond_debug_register(struct bonding *bond)
                debugfs_create_dir(bond->dev->name, bonding_debug_root);
 
        if (!bond->debug_dir) {
-               pr_warn("%s: Warning: failed to register to debugfs\n",
-                       bond->dev->name);
+               netdev_warn(bond->dev, "failed to register to debugfs\n");
                return;
        }
 
@@ -98,8 +97,7 @@ void bond_debug_reregister(struct bonding *bond)
        if (d) {
                bond->debug_dir = d;
        } else {
-               pr_warn("%s: Warning: failed to reregister, so just unregister old one\n",
-                       bond->dev->name);
+               netdev_warn(bond->dev, "failed to reregister, so just unregister old one\n");
                bond_debug_unregister(bond);
        }
 }
index 04f35f9..f0f5eab 100644 (file)
@@ -31,8 +31,6 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/types.h>
@@ -498,11 +496,10 @@ static int bond_set_promiscuity(struct bonding *bond, int inc)
        int err = 0;
 
        if (bond_uses_primary(bond)) {
-               /* write lock already acquired */
-               if (bond->curr_active_slave) {
-                       err = dev_set_promiscuity(bond->curr_active_slave->dev,
-                                                 inc);
-               }
+               struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
+
+               if (curr_active)
+                       err = dev_set_promiscuity(curr_active->dev, inc);
        } else {
                struct slave *slave;
 
@@ -524,11 +521,10 @@ static int bond_set_allmulti(struct bonding *bond, int inc)
        int err = 0;
 
        if (bond_uses_primary(bond)) {
-               /* write lock already acquired */
-               if (bond->curr_active_slave) {
-                       err = dev_set_allmulti(bond->curr_active_slave->dev,
-                                              inc);
-               }
+               struct slave *curr_active = rtnl_dereference(bond->curr_active_slave);
+
+               if (curr_active)
+                       err = dev_set_allmulti(curr_active->dev, inc);
        } else {
                struct slave *slave;
 
@@ -629,8 +625,8 @@ static void bond_hw_addr_swap(struct bonding *bond, struct slave *new_active,
 static void bond_set_dev_addr(struct net_device *bond_dev,
                              struct net_device *slave_dev)
 {
-       pr_debug("bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
-                bond_dev, slave_dev, slave_dev->addr_len);
+       netdev_dbg(bond_dev, "bond_dev=%p slave_dev=%p slave_dev->addr_len=%d\n",
+                  bond_dev, slave_dev, slave_dev->addr_len);
        memcpy(bond_dev->dev_addr, slave_dev->dev_addr, slave_dev->addr_len);
        bond_dev->addr_assign_type = NET_ADDR_STOLEN;
        call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
@@ -684,8 +680,8 @@ static void bond_do_fail_over_mac(struct bonding *bond,
 
                rv = dev_set_mac_address(new_active->dev, &saddr);
                if (rv) {
-                       pr_err("%s: Error %d setting MAC of slave %s\n",
-                              bond->dev->name, -rv, new_active->dev->name);
+                       netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
+                                  -rv, new_active->dev->name);
                        goto out;
                }
 
@@ -697,14 +693,14 @@ static void bond_do_fail_over_mac(struct bonding *bond,
 
                rv = dev_set_mac_address(old_active->dev, &saddr);
                if (rv)
-                       pr_err("%s: Error %d setting MAC of slave %s\n",
-                              bond->dev->name, -rv, new_active->dev->name);
+                       netdev_err(bond->dev, "Error %d setting MAC of slave %s\n",
+                                  -rv, new_active->dev->name);
 out:
                write_lock_bh(&bond->curr_slave_lock);
                break;
        default:
-               pr_err("%s: bond_do_fail_over_mac impossible: bad policy %d\n",
-                      bond->dev->name, bond->params.fail_over_mac);
+               netdev_err(bond->dev, "bond_do_fail_over_mac impossible: bad policy %d\n",
+                          bond->params.fail_over_mac);
                break;
        }
 
@@ -713,7 +709,7 @@ out:
 static bool bond_should_change_active(struct bonding *bond)
 {
        struct slave *prim = bond->primary_slave;
-       struct slave *curr = bond->curr_active_slave;
+       struct slave *curr = bond_deref_active_protected(bond);
 
        if (!prim || !curr || curr->link != BOND_LINK_UP)
                return true;
@@ -765,8 +761,8 @@ static bool bond_should_notify_peers(struct bonding *bond)
        slave = rcu_dereference(bond->curr_active_slave);
        rcu_read_unlock();
 
-       pr_debug("bond_should_notify_peers: bond %s slave %s\n",
-                bond->dev->name, slave ? slave->dev->name : "NULL");
+       netdev_dbg(bond->dev, "bond_should_notify_peers: slave %s\n",
+                  slave ? slave->dev->name : "NULL");
 
        if (!slave || !bond->send_peer_notif ||
            test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
@@ -792,7 +788,11 @@ static bool bond_should_notify_peers(struct bonding *bond)
  */
 void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 {
-       struct slave *old_active = bond->curr_active_slave;
+       struct slave *old_active;
+
+       old_active = rcu_dereference_protected(bond->curr_active_slave,
+                                              !new_active ||
+                                              lockdep_is_held(&bond->curr_slave_lock));
 
        if (old_active == new_active)
                return;
@@ -802,9 +802,9 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
 
                if (new_active->link == BOND_LINK_BACK) {
                        if (bond_uses_primary(bond)) {
-                               pr_info("%s: making interface %s the new active one %d ms earlier\n",
-                                       bond->dev->name, new_active->dev->name,
-                                       (bond->params.updelay - new_active->delay) * bond->params.miimon);
+                               netdev_info(bond->dev, "making interface %s the new active one %d ms earlier\n",
+                                           new_active->dev->name,
+                                           (bond->params.updelay - new_active->delay) * bond->params.miimon);
                        }
 
                        new_active->delay = 0;
@@ -817,8 +817,8 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
                                bond_alb_handle_link_change(bond, new_active, BOND_LINK_UP);
                } else {
                        if (bond_uses_primary(bond)) {
-                               pr_info("%s: making interface %s the new active one\n",
-                                       bond->dev->name, new_active->dev->name);
+                               netdev_info(bond->dev, "making interface %s the new active one\n",
+                                           new_active->dev->name);
                        }
                }
        }
@@ -900,18 +900,16 @@ void bond_select_active_slave(struct bonding *bond)
        int rv;
 
        best_slave = bond_find_best_slave(bond);
-       if (best_slave != bond->curr_active_slave) {
+       if (best_slave != bond_deref_active_protected(bond)) {
                bond_change_active_slave(bond, best_slave);
                rv = bond_set_carrier(bond);
                if (!rv)
                        return;
 
                if (netif_carrier_ok(bond->dev)) {
-                       pr_info("%s: first active interface up!\n",
-                               bond->dev->name);
+                       netdev_info(bond->dev, "first active interface up!\n");
                } else {
-                       pr_info("%s: now running without any active interface!\n",
-                               bond->dev->name);
+                       netdev_info(bond->dev, "now running without any active interface!\n");
                }
        }
 }
@@ -1001,12 +999,6 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
        netdev_features_t mask;
        struct slave *slave;
 
-       if (!bond_has_slaves(bond)) {
-               /* Disable adding VLANs to empty bond. But why? --mq */
-               features |= NETIF_F_VLAN_CHALLENGED;
-               return features;
-       }
-
        mask = features;
        features &= ~NETIF_F_ONE_FOR_ALL;
        features |= NETIF_F_ALL_FOR_ALL;
@@ -1025,10 +1017,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
                                 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
                                 NETIF_F_HIGHDMA | NETIF_F_LRO)
 
+#define BOND_ENC_FEATURES      (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
+                                NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
+
 static void bond_compute_features(struct bonding *bond)
 {
        unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
        netdev_features_t vlan_features = BOND_VLAN_FEATURES;
+       netdev_features_t enc_features  = BOND_ENC_FEATURES;
        struct net_device *bond_dev = bond->dev;
        struct list_head *iter;
        struct slave *slave;
@@ -1044,6 +1040,9 @@ static void bond_compute_features(struct bonding *bond)
                vlan_features = netdev_increment_features(vlan_features,
                        slave->dev->vlan_features, BOND_VLAN_FEATURES);
 
+               enc_features = netdev_increment_features(enc_features,
+                                                        slave->dev->hw_enc_features,
+                                                        BOND_ENC_FEATURES);
                dst_release_flag &= slave->dev->priv_flags;
                if (slave->dev->hard_header_len > max_hard_header_len)
                        max_hard_header_len = slave->dev->hard_header_len;
@@ -1054,6 +1053,7 @@ static void bond_compute_features(struct bonding *bond)
 
 done:
        bond_dev->vlan_features = vlan_features;
+       bond_dev->hw_enc_features = enc_features;
        bond_dev->hard_header_len = max_hard_header_len;
        bond_dev->gso_max_segs = gso_max_segs;
        netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -1206,36 +1206,38 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        if (!bond->params.use_carrier &&
            slave_dev->ethtool_ops->get_link == NULL &&
            slave_ops->ndo_do_ioctl == NULL) {
-               pr_warn("%s: Warning: no link monitoring support for %s\n",
-                       bond_dev->name, slave_dev->name);
+               netdev_warn(bond_dev, "no link monitoring support for %s\n",
+                           slave_dev->name);
        }
 
        /* already enslaved */
        if (slave_dev->flags & IFF_SLAVE) {
-               pr_debug("Error: Device was already enslaved\n");
+               netdev_dbg(bond_dev, "Error: Device was already enslaved\n");
                return -EBUSY;
        }
 
        if (bond_dev == slave_dev) {
-               pr_err("%s: cannot enslave bond to itself.\n", bond_dev->name);
+               netdev_err(bond_dev, "cannot enslave bond to itself.\n");
                return -EPERM;
        }
 
        /* vlan challenged mutual exclusion */
        /* no need to lock since we're protected by rtnl_lock */
        if (slave_dev->features & NETIF_F_VLAN_CHALLENGED) {
-               pr_debug("%s: NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
+               netdev_dbg(bond_dev, "%s is NETIF_F_VLAN_CHALLENGED\n",
+                          slave_dev->name);
                if (vlan_uses_dev(bond_dev)) {
-                       pr_err("%s: Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
-                              bond_dev->name, slave_dev->name, bond_dev->name);
+                       netdev_err(bond_dev, "Error: cannot enslave VLAN challenged slave %s on VLAN enabled bond %s\n",
+                                  slave_dev->name, bond_dev->name);
                        return -EPERM;
                } else {
-                       pr_warn("%s: Warning: enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
-                               bond_dev->name, slave_dev->name,
-                               slave_dev->name, bond_dev->name);
+                       netdev_warn(bond_dev, "enslaved VLAN challenged slave %s. Adding VLANs will be blocked as long as %s is part of bond %s\n",
+                                   slave_dev->name, slave_dev->name,
+                                   bond_dev->name);
                }
        } else {
-               pr_debug("%s: ! NETIF_F_VLAN_CHALLENGED\n", slave_dev->name);
+               netdev_dbg(bond_dev, "%s is !NETIF_F_VLAN_CHALLENGED\n",
+                          slave_dev->name);
        }
 
        /*
@@ -1245,8 +1247,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         * enslaving it; the old ifenslave will not.
         */
        if ((slave_dev->flags & IFF_UP)) {
-               pr_err("%s is up - this may be due to an out of date ifenslave\n",
-                      slave_dev->name);
+               netdev_err(bond_dev, "%s is up - this may be due to an out of date ifenslave\n",
+                          slave_dev->name);
                res = -EPERM;
                goto err_undo_flags;
        }
@@ -1260,16 +1262,14 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
         */
        if (!bond_has_slaves(bond)) {
                if (bond_dev->type != slave_dev->type) {
-                       pr_debug("%s: change device type from %d to %d\n",
-                                bond_dev->name,
-                                bond_dev->type, slave_dev->type);
+                       netdev_dbg(bond_dev, "change device type from %d to %d\n",
+                                  bond_dev->type, slave_dev->type);
 
                        res = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE,
                                                       bond_dev);
                        res = notifier_to_errno(res);
                        if (res) {
-                               pr_err("%s: refused to change device type\n",
-                                      bond_dev->name);
+                               netdev_err(bond_dev, "refused to change device type\n");
                                res = -EBUSY;
                                goto err_undo_flags;
                        }
@@ -1289,26 +1289,24 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                                                 bond_dev);
                }
        } else if (bond_dev->type != slave_dev->type) {
-               pr_err("%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
-                      slave_dev->name, slave_dev->type, bond_dev->type);
+               netdev_err(bond_dev, "%s ether type (%d) is different from other slaves (%d), can not enslave it\n",
+                          slave_dev->name, slave_dev->type, bond_dev->type);
                res = -EINVAL;
                goto err_undo_flags;
        }
 
        if (slave_ops->ndo_set_mac_address == NULL) {
-               if (!bond_has_slaves(bond)) {
-                       pr_warn("%s: Warning: The first slave device specified does not support setting the MAC address\n",
-                               bond_dev->name);
-                       if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP) {
+               netdev_warn(bond_dev, "The slave device specified does not support setting the MAC address\n");
+               if (BOND_MODE(bond) == BOND_MODE_ACTIVEBACKUP &&
+                   bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
+                       if (!bond_has_slaves(bond)) {
                                bond->params.fail_over_mac = BOND_FOM_ACTIVE;
-                               pr_warn("%s: Setting fail_over_mac to active for active-backup mode\n",
-                                       bond_dev->name);
+                               netdev_warn(bond_dev, "Setting fail_over_mac to active for active-backup mode\n");
+                       } else {
+                               netdev_err(bond_dev, "The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n");
+                               res = -EOPNOTSUPP;
+                               goto err_undo_flags;
                        }
-               } else if (bond->params.fail_over_mac != BOND_FOM_ACTIVE) {
-                       pr_err("%s: Error: The slave device specified does not support setting the MAC address, but fail_over_mac is not set to active\n",
-                              bond_dev->name);
-                       res = -EOPNOTSUPP;
-                       goto err_undo_flags;
                }
        }
 
@@ -1338,7 +1336,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        new_slave->original_mtu = slave_dev->mtu;
        res = dev_set_mtu(slave_dev, bond->dev->mtu);
        if (res) {
-               pr_debug("Error %d calling dev_set_mtu\n", res);
+               netdev_dbg(bond_dev, "Error %d calling dev_set_mtu\n", res);
                goto err_free;
        }
 
@@ -1359,7 +1357,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                addr.sa_family = slave_dev->type;
                res = dev_set_mac_address(slave_dev, &addr);
                if (res) {
-                       pr_debug("Error %d calling set_mac_address\n", res);
+                       netdev_dbg(bond_dev, "Error %d calling set_mac_address\n", res);
                        goto err_restore_mtu;
                }
        }
@@ -1367,7 +1365,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        /* open the slave since the application closed it */
        res = dev_open(slave_dev);
        if (res) {
-               pr_debug("Opening slave %s failed\n", slave_dev->name);
+               netdev_dbg(bond_dev, "Opening slave %s failed\n", slave_dev->name);
                goto err_restore_mac;
        }
 
@@ -1417,8 +1415,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        res = vlan_vids_add_by_dev(slave_dev, bond_dev);
        if (res) {
-               pr_err("%s: Error: Couldn't add bond vlan ids to %s\n",
-                      bond_dev->name, slave_dev->name);
+               netdev_err(bond_dev, "Couldn't add bond vlan ids to %s\n",
+                          slave_dev->name);
                goto err_close;
        }
 
@@ -1447,12 +1445,12 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                         * supported); thus, we don't need to change
                         * the messages for netif_carrier.
                         */
-                       pr_warn("%s: Warning: MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
-                               bond_dev->name, slave_dev->name);
+                       netdev_warn(bond_dev, "MII and ETHTOOL support not available for interface %s, and arp_interval/arp_ip_target module parameters not specified, thus bonding will not detect link failures! see bonding.txt for details\n",
+                                   slave_dev->name);
                } else if (link_reporting == -1) {
                        /* unable get link status using mii/ethtool */
-                       pr_warn("%s: Warning: can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
-                               bond_dev->name, slave_dev->name);
+                       netdev_warn(bond_dev, "can't get link status from interface %s; the network driver associated with this interface does not support MII or ETHTOOL link status reporting, thus miimon has no effect on this interface\n",
+                                   slave_dev->name);
                }
        }
 
@@ -1477,9 +1475,9 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
 
        if (new_slave->link != BOND_LINK_DOWN)
                new_slave->last_link_up = jiffies;
-       pr_debug("Initial state of slave_dev is BOND_LINK_%s\n",
-                new_slave->link == BOND_LINK_DOWN ? "DOWN" :
-                (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
+       netdev_dbg(bond_dev, "Initial state of slave_dev is BOND_LINK_%s\n",
+                  new_slave->link == BOND_LINK_DOWN ? "DOWN" :
+                  (new_slave->link == BOND_LINK_UP ? "UP" : "BACK"));
 
        if (bond_uses_primary(bond) && bond->params.primary[0]) {
                /* if there is a primary slave, remember it */
@@ -1520,7 +1518,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                bond_set_slave_inactive_flags(new_slave, BOND_SLAVE_NOTIFY_NOW);
                break;
        default:
-               pr_debug("This slave is always active in trunk mode\n");
+               netdev_dbg(bond_dev, "This slave is always active in trunk mode\n");
 
                /* always active in trunk mode */
                bond_set_active_slave(new_slave);
@@ -1529,7 +1527,8 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                 * anyway (it holds no special properties of the bond device),
                 * so we can change it without calling change_active_interface()
                 */
-               if (!bond->curr_active_slave && new_slave->link == BOND_LINK_UP)
+               if (!rcu_access_pointer(bond->curr_active_slave) &&
+                   new_slave->link == BOND_LINK_UP)
                        rcu_assign_pointer(bond->curr_active_slave, new_slave);
 
                break;
@@ -1539,8 +1538,7 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        slave_dev->npinfo = bond->dev->npinfo;
        if (slave_dev->npinfo) {
                if (slave_enable_netpoll(new_slave)) {
-                       pr_info("Error, %s: master_dev is using netpoll, but new slave device does not support netpoll\n",
-                               bond_dev->name);
+                       netdev_info(bond_dev, "master_dev is using netpoll, but new slave device does not support netpoll\n");
                        res = -EBUSY;
                        goto err_detach;
                }
@@ -1550,19 +1548,19 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
        res = netdev_rx_handler_register(slave_dev, bond_handle_frame,
                                         new_slave);
        if (res) {
-               pr_debug("Error %d calling netdev_rx_handler_register\n", res);
+               netdev_dbg(bond_dev, "Error %d calling netdev_rx_handler_register\n", res);
                goto err_detach;
        }
 
        res = bond_master_upper_dev_link(bond_dev, slave_dev, new_slave);
        if (res) {
-               pr_debug("Error %d calling bond_master_upper_dev_link\n", res);
+               netdev_dbg(bond_dev, "Error %d calling bond_master_upper_dev_link\n", res);
                goto err_unregister;
        }
 
        res = bond_sysfs_slave_add(new_slave);
        if (res) {
-               pr_debug("Error %d calling bond_sysfs_slave_add\n", res);
+               netdev_dbg(bond_dev, "Error %d calling bond_sysfs_slave_add\n", res);
                goto err_upper_unlink;
        }
 
@@ -1578,10 +1576,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
                unblock_netpoll_tx();
        }
 
-       pr_info("%s: Enslaving %s as %s interface with %s link\n",
-               bond_dev->name, slave_dev->name,
-               bond_is_active_slave(new_slave) ? "an active" : "a backup",
-               new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
+       netdev_info(bond_dev, "Enslaving %s as %s interface with %s link\n",
+                   slave_dev->name,
+                   bond_is_active_slave(new_slave) ? "an active" : "a backup",
+                   new_slave->link != BOND_LINK_DOWN ? "an up" : "a down");
 
        /* enslave is successful */
        return 0;
@@ -1600,7 +1598,7 @@ err_detach:
        vlan_vids_del_by_dev(slave_dev, bond_dev);
        if (bond->primary_slave == new_slave)
                bond->primary_slave = NULL;
-       if (bond->curr_active_slave == new_slave) {
+       if (rcu_access_pointer(bond->curr_active_slave) == new_slave) {
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
                bond_change_active_slave(bond, NULL);
@@ -1666,8 +1664,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        /* slave is not a slave or master is not master of this slave */
        if (!(slave_dev->flags & IFF_SLAVE) ||
            !netdev_has_upper_dev(slave_dev, bond_dev)) {
-               pr_err("%s: Error: cannot release %s\n",
-                      bond_dev->name, slave_dev->name);
+               netdev_err(bond_dev, "cannot release %s\n",
+                          slave_dev->name);
                return -EINVAL;
        }
 
@@ -1676,8 +1674,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        slave = bond_get_slave_by_dev(bond, slave_dev);
        if (!slave) {
                /* not a slave of this bond */
-               pr_info("%s: %s not enslaved\n",
-                       bond_dev->name, slave_dev->name);
+               netdev_info(bond_dev, "%s not enslaved\n",
+                           slave_dev->name);
                unblock_netpoll_tx();
                return -EINVAL;
        }
@@ -1697,23 +1695,21 @@ static int __bond_release_one(struct net_device *bond_dev,
 
        write_unlock_bh(&bond->lock);
 
-       pr_info("%s: Releasing %s interface %s\n",
-               bond_dev->name,
-               bond_is_active_slave(slave) ? "active" : "backup",
-               slave_dev->name);
+       netdev_info(bond_dev, "Releasing %s interface %s\n",
+                   bond_is_active_slave(slave) ? "active" : "backup",
+                   slave_dev->name);
 
-       oldcurrent = bond->curr_active_slave;
+       oldcurrent = rcu_access_pointer(bond->curr_active_slave);
 
-       bond->current_arp_slave = NULL;
+       RCU_INIT_POINTER(bond->current_arp_slave, NULL);
 
        if (!all && (!bond->params.fail_over_mac ||
                     BOND_MODE(bond) != BOND_MODE_ACTIVEBACKUP)) {
                if (ether_addr_equal_64bits(bond_dev->dev_addr, slave->perm_hwaddr) &&
                    bond_has_slaves(bond))
-                       pr_warn("%s: Warning: the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
-                               bond_dev->name, slave_dev->name,
-                               slave->perm_hwaddr,
-                               bond_dev->name, slave_dev->name);
+                       netdev_warn(bond_dev, "the permanent HWaddr of %s - %pM - is still in use by %s - set the HWaddr of %s to a different address to avoid conflicts\n",
+                                   slave_dev->name, slave->perm_hwaddr,
+                                   bond_dev->name, slave_dev->name);
        }
 
        if (bond->primary_slave == slave)
@@ -1752,13 +1748,6 @@ static int __bond_release_one(struct net_device *bond_dev,
        if (!bond_has_slaves(bond)) {
                bond_set_carrier(bond);
                eth_hw_addr_random(bond_dev);
-
-               if (vlan_uses_dev(bond_dev)) {
-                       pr_warn("%s: Warning: clearing HW address of %s while it still has VLANs\n",
-                               bond_dev->name, bond_dev->name);
-                       pr_warn("%s: When re-adding slaves, make sure the bond's HW address matches its VLANs\n",
-                               bond_dev->name);
-               }
        }
 
        unblock_netpoll_tx();
@@ -1773,8 +1762,8 @@ static int __bond_release_one(struct net_device *bond_dev,
        bond_compute_features(bond);
        if (!(bond_dev->features & NETIF_F_VLAN_CHALLENGED) &&
            (old_features & NETIF_F_VLAN_CHALLENGED))
-               pr_info("%s: last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
-                       bond_dev->name, slave_dev->name, bond_dev->name);
+               netdev_info(bond_dev, "last VLAN challenged slave %s left bond %s - VLAN blocking is removed\n",
+                           slave_dev->name, bond_dev->name);
 
        /* must do this from outside any spinlocks */
        vlan_vids_del_by_dev(slave_dev, bond_dev);
@@ -1841,8 +1830,8 @@ static int  bond_release_and_destroy(struct net_device *bond_dev,
        ret = bond_release(bond_dev, slave_dev);
        if (ret == 0 && !bond_has_slaves(bond)) {
                bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
-               pr_info("%s: Destroying bond %s\n",
-                       bond_dev->name, bond_dev->name);
+               netdev_info(bond_dev, "Destroying bond %s\n",
+                           bond_dev->name);
                unregister_netdevice(bond_dev);
        }
        return ret;
@@ -1883,7 +1872,7 @@ static int bond_slave_info_query(struct net_device *bond_dev, struct ifslave *in
 
 /*-------------------------------- Monitoring -------------------------------*/
 
-
+/* called with rcu_read_lock() */
 static int bond_miimon_inspect(struct bonding *bond)
 {
        int link_state, commit = 0;
@@ -1891,7 +1880,7 @@ static int bond_miimon_inspect(struct bonding *bond)
        struct slave *slave;
        bool ignore_updelay;
 
-       ignore_updelay = !bond->curr_active_slave ? true : false;
+       ignore_updelay = !rcu_dereference(bond->curr_active_slave);
 
        bond_for_each_slave_rcu(bond, slave, iter) {
                slave->new_link = BOND_LINK_NOCHANGE;
@@ -1906,14 +1895,13 @@ static int bond_miimon_inspect(struct bonding *bond)
                        slave->link = BOND_LINK_FAIL;
                        slave->delay = bond->params.downdelay;
                        if (slave->delay) {
-                               pr_info("%s: link status down for %sinterface %s, disabling it in %d ms\n",
-                                       bond->dev->name,
-                                       (BOND_MODE(bond) ==
-                                        BOND_MODE_ACTIVEBACKUP) ?
-                                       (bond_is_active_slave(slave) ?
-                                        "active " : "backup ") : "",
-                                       slave->dev->name,
-                                       bond->params.downdelay * bond->params.miimon);
+                               netdev_info(bond->dev, "link status down for %sinterface %s, disabling it in %d ms\n",
+                                           (BOND_MODE(bond) ==
+                                            BOND_MODE_ACTIVEBACKUP) ?
+                                            (bond_is_active_slave(slave) ?
+                                             "active " : "backup ") : "",
+                                           slave->dev->name,
+                                           bond->params.downdelay * bond->params.miimon);
                        }
                        /*FALLTHRU*/
                case BOND_LINK_FAIL:
@@ -1923,11 +1911,10 @@ static int bond_miimon_inspect(struct bonding *bond)
                                 */
                                slave->link = BOND_LINK_UP;
                                slave->last_link_up = jiffies;
-                               pr_info("%s: link status up again after %d ms for interface %s\n",
-                                       bond->dev->name,
-                                       (bond->params.downdelay - slave->delay) *
-                                       bond->params.miimon,
-                                       slave->dev->name);
+                               netdev_info(bond->dev, "link status up again after %d ms for interface %s\n",
+                                           (bond->params.downdelay - slave->delay) *
+                                           bond->params.miimon,
+                                           slave->dev->name);
                                continue;
                        }
 
@@ -1948,21 +1935,20 @@ static int bond_miimon_inspect(struct bonding *bond)
                        slave->delay = bond->params.updelay;
 
                        if (slave->delay) {
-                               pr_info("%s: link status up for interface %s, enabling it in %d ms\n",
-                                       bond->dev->name, slave->dev->name,
-                                       ignore_updelay ? 0 :
-                                       bond->params.updelay *
-                                       bond->params.miimon);
+                               netdev_info(bond->dev, "link status up for interface %s, enabling it in %d ms\n",
+                                           slave->dev->name,
+                                           ignore_updelay ? 0 :
+                                           bond->params.updelay *
+                                           bond->params.miimon);
                        }
                        /*FALLTHRU*/
                case BOND_LINK_BACK:
                        if (!link_state) {
                                slave->link = BOND_LINK_DOWN;
-                               pr_info("%s: link status down again after %d ms for interface %s\n",
-                                       bond->dev->name,
-                                       (bond->params.updelay - slave->delay) *
-                                       bond->params.miimon,
-                                       slave->dev->name);
+                               netdev_info(bond->dev, "link status down again after %d ms for interface %s\n",
+                                           (bond->params.updelay - slave->delay) *
+                                           bond->params.miimon,
+                                           slave->dev->name);
 
                                continue;
                        }
@@ -2010,10 +1996,10 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_set_backup_slave(slave);
                        }
 
-                       pr_info("%s: link status definitely up for interface %s, %u Mbps %s duplex\n",
-                               bond->dev->name, slave->dev->name,
-                               slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
-                               slave->duplex ? "full" : "half");
+                       netdev_info(bond->dev, "link status definitely up for interface %s, %u Mbps %s duplex\n",
+                                   slave->dev->name,
+                                   slave->speed == SPEED_UNKNOWN ? 0 : slave->speed,
+                                   slave->duplex ? "full" : "half");
 
                        /* notify ad that the link status has changed */
                        if (BOND_MODE(bond) == BOND_MODE_8023AD)
@@ -2040,8 +2026,8 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
 
-                       pr_info("%s: link status definitely down for interface %s, disabling it\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
+                                   slave->dev->name);
 
                        if (BOND_MODE(bond) == BOND_MODE_8023AD)
                                bond_3ad_handle_link_change(slave,
@@ -2051,15 +2037,14 @@ static void bond_miimon_commit(struct bonding *bond)
                                bond_alb_handle_link_change(bond, slave,
                                                            BOND_LINK_DOWN);
 
-                       if (slave == bond->curr_active_slave)
+                       if (slave == rcu_access_pointer(bond->curr_active_slave))
                                goto do_failover;
 
                        continue;
 
                default:
-                       pr_err("%s: invalid new link %d on slave %s\n",
-                              bond->dev->name, slave->new_link,
-                              slave->dev->name);
+                       netdev_err(bond->dev, "invalid new link %d on slave %s\n",
+                                  slave->new_link, slave->dev->name);
                        slave->new_link = BOND_LINK_NOCHANGE;
 
                        continue;
@@ -2160,10 +2145,10 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                          struct bond_vlan_tag *tags)
 {
        struct sk_buff *skb;
-       int i;
+       struct bond_vlan_tag *outer_tag = tags;
 
-       pr_debug("arp %d on slave %s: dst %pI4 src %pI4\n",
-                arp_op, slave_dev->name, &dest_ip, &src_ip);
+       netdev_dbg(slave_dev, "arp %d on slave %s: dst %pI4 src %pI4\n",
+                  arp_op, slave_dev->name, &dest_ip, &src_ip);
 
        skb = arp_create(arp_op, ETH_P_ARP, dest_ip, slave_dev, src_ip,
                         NULL, slave_dev->dev_addr, NULL);
@@ -2173,30 +2158,42 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
                return;
        }
 
+       if (!tags || tags->vlan_proto == VLAN_N_VID)
+               goto xmit;
+
+       tags++;
+
        /* Go through all the tags backwards and add them to the packet */
-       for (i = BOND_MAX_VLAN_ENCAP - 1; i > 0; i--) {
-               if (!tags[i].vlan_id)
+       while (tags->vlan_proto != VLAN_N_VID) {
+               if (!tags->vlan_id) {
+                       tags++;
                        continue;
+               }
 
-               pr_debug("inner tag: proto %X vid %X\n",
-                        ntohs(tags[i].vlan_proto), tags[i].vlan_id);
-               skb = __vlan_put_tag(skb, tags[i].vlan_proto,
-                                    tags[i].vlan_id);
+               netdev_dbg(slave_dev, "inner tag: proto %X vid %X\n",
+                          ntohs(outer_tag->vlan_proto), tags->vlan_id);
+               skb = __vlan_put_tag(skb, tags->vlan_proto,
+                                    tags->vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert inner VLAN tag\n");
                        return;
                }
+
+               tags++;
        }
        /* Set the outer tag */
-       if (tags[0].vlan_id) {
-               pr_debug("outer tag: proto %X vid %X\n",
-                        ntohs(tags[0].vlan_proto), tags[0].vlan_id);
-               skb = vlan_put_tag(skb, tags[0].vlan_proto, tags[0].vlan_id);
+       if (outer_tag->vlan_id) {
+               netdev_dbg(slave_dev, "outer tag: proto %X vid %X\n",
+                          ntohs(outer_tag->vlan_proto), outer_tag->vlan_id);
+               skb = vlan_put_tag(skb, outer_tag->vlan_proto,
+                                  outer_tag->vlan_id);
                if (!skb) {
                        net_err_ratelimited("failed to insert outer VLAN tag\n");
                        return;
                }
        }
+
+xmit:
        arp_xmit(skb);
 }
 
@@ -2206,46 +2203,50 @@ static void bond_arp_send(struct net_device *slave_dev, int arp_op,
  * When the path is validated, collect any vlan information in the
  * path.
  */
-bool bond_verify_device_path(struct net_device *start_dev,
-                            struct net_device *end_dev,
-                            struct bond_vlan_tag *tags)
+struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
+                                             struct net_device *end_dev,
+                                             int level)
 {
+       struct bond_vlan_tag *tags;
        struct net_device *upper;
        struct list_head  *iter;
-       int  idx;
 
-       if (start_dev == end_dev)
-               return true;
+       if (start_dev == end_dev) {
+               tags = kzalloc(sizeof(*tags) * (level + 1), GFP_ATOMIC);
+               if (!tags)
+                       return ERR_PTR(-ENOMEM);
+               tags[level].vlan_proto = VLAN_N_VID;
+               return tags;
+       }
 
        netdev_for_each_upper_dev_rcu(start_dev, upper, iter) {
-               if (bond_verify_device_path(upper, end_dev, tags)) {
-                       if (is_vlan_dev(upper)) {
-                               idx = vlan_get_encap_level(upper);
-                               if (idx >= BOND_MAX_VLAN_ENCAP)
-                                       return false;
-
-                               tags[idx].vlan_proto =
-                                                   vlan_dev_vlan_proto(upper);
-                               tags[idx].vlan_id = vlan_dev_vlan_id(upper);
-                       }
-                       return true;
+               tags = bond_verify_device_path(upper, end_dev, level + 1);
+               if (IS_ERR_OR_NULL(tags)) {
+                       if (IS_ERR(tags))
+                               return tags;
+                       continue;
+               }
+               if (is_vlan_dev(upper)) {
+                       tags[level].vlan_proto = vlan_dev_vlan_proto(upper);
+                       tags[level].vlan_id = vlan_dev_vlan_id(upper);
                }
+
+               return tags;
        }
 
-       return false;
+       return NULL;
 }
 
 static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
 {
        struct rtable *rt;
-       struct bond_vlan_tag tags[BOND_MAX_VLAN_ENCAP];
+       struct bond_vlan_tag *tags;
        __be32 *targets = bond->params.arp_targets, addr;
        int i;
-       bool ret;
 
        for (i = 0; i < BOND_MAX_ARP_TARGETS && targets[i]; i++) {
-               pr_debug("basa: target %pI4\n", &targets[i]);
-               memset(tags, 0, sizeof(tags));
+               netdev_dbg(bond->dev, "basa: target %pI4\n", &targets[i]);
+               tags = NULL;
 
                /* Find out through which dev should the packet go */
                rt = ip_route_output(dev_net(bond->dev), targets[i], 0,
@@ -2268,16 +2269,15 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
                        goto found;
 
                rcu_read_lock();
-               ret = bond_verify_device_path(bond->dev, rt->dst.dev, tags);
+               tags = bond_verify_device_path(bond->dev, rt->dst.dev, 0);
                rcu_read_unlock();
 
-               if (ret)
+               if (!IS_ERR_OR_NULL(tags))
                        goto found;
 
                /* Not our device - skip */
-               pr_debug("%s: no path to arp_ip_target %pI4 via rt.dev %s\n",
-                        bond->dev->name, &targets[i],
-                        rt->dst.dev ? rt->dst.dev->name : "NULL");
+               netdev_dbg(bond->dev, "no path to arp_ip_target %pI4 via rt.dev %s\n",
+                          &targets[i], rt->dst.dev ? rt->dst.dev->name : "NULL");
 
                ip_rt_put(rt);
                continue;
@@ -2287,6 +2287,7 @@ found:
                ip_rt_put(rt);
                bond_arp_send(slave->dev, ARPOP_REQUEST, targets[i],
                              addr, tags);
+               kfree(tags);
        }
 }
 
@@ -2295,13 +2296,15 @@ static void bond_validate_arp(struct bonding *bond, struct slave *slave, __be32
        int i;
 
        if (!sip || !bond_has_this_ip(bond, tip)) {
-               pr_debug("bva: sip %pI4 tip %pI4 not found\n", &sip, &tip);
+               netdev_dbg(bond->dev, "bva: sip %pI4 tip %pI4 not found\n",
+                          &sip, &tip);
                return;
        }
 
        i = bond_get_targets_ip(bond->params.arp_targets, sip);
        if (i == -1) {
-               pr_debug("bva: sip %pI4 not found in targets\n", &sip);
+               netdev_dbg(bond->dev, "bva: sip %pI4 not found in targets\n",
+                          &sip);
                return;
        }
        slave->last_rx = jiffies;
@@ -2328,8 +2331,8 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
 
        alen = arp_hdr_len(bond->dev);
 
-       pr_debug("bond_arp_rcv: bond %s skb->dev %s\n",
-                bond->dev->name, skb->dev->name);
+       netdev_dbg(bond->dev, "bond_arp_rcv: skb->dev %s\n",
+                  skb->dev->name);
 
        if (alen > skb_headlen(skb)) {
                arp = kmalloc(alen, GFP_ATOMIC);
@@ -2353,10 +2356,10 @@ int bond_arp_rcv(const struct sk_buff *skb, struct bonding *bond,
        arp_ptr += 4 + bond->dev->addr_len;
        memcpy(&tip, arp_ptr, 4);
 
-       pr_debug("bond_arp_rcv: %s %s/%d av %d sv %d sip %pI4 tip %pI4\n",
-                bond->dev->name, slave->dev->name, bond_slave_state(slave),
-                bond->params.arp_validate, slave_do_arp_validate(bond, slave),
-                &sip, &tip);
+       netdev_dbg(bond->dev, "bond_arp_rcv: %s/%d av %d sv %d sip %pI4 tip %pI4\n",
+                  slave->dev->name, bond_slave_state(slave),
+                    bond->params.arp_validate, slave_do_arp_validate(bond, slave),
+                    &sip, &tip);
 
        curr_active_slave = rcu_dereference(bond->curr_active_slave);
 
@@ -2421,7 +2424,7 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
 
        rcu_read_lock();
 
-       oldcurrent = ACCESS_ONCE(bond->curr_active_slave);
+       oldcurrent = rcu_dereference(bond->curr_active_slave);
        /* see if any of the previous devices are up now (i.e. they have
         * xmt and rcv traffic). the curr_active_slave does not come into
         * the picture unless it is null. also, slave->last_link_up is not
@@ -2446,14 +2449,12 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                                 * is closed.
                                 */
                                if (!oldcurrent) {
-                                       pr_info("%s: link status definitely up for interface %s\n",
-                                               bond->dev->name,
-                                               slave->dev->name);
+                                       netdev_info(bond->dev, "link status definitely up for interface %s\n",
+                                                   slave->dev->name);
                                        do_failover = 1;
                                } else {
-                                       pr_info("%s: interface %s is now up\n",
-                                               bond->dev->name,
-                                               slave->dev->name);
+                                       netdev_info(bond->dev, "interface %s is now up\n",
+                                                   slave->dev->name);
                                }
                        }
                } else {
@@ -2472,8 +2473,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
                                if (slave->link_failure_count < UINT_MAX)
                                        slave->link_failure_count++;
 
-                               pr_info("%s: interface %s is now down\n",
-                                       bond->dev->name, slave->dev->name);
+                               netdev_info(bond->dev, "interface %s is now down\n",
+                                           slave->dev->name);
 
                                if (slave == oldcurrent)
                                        do_failover = 1;
@@ -2569,7 +2570,7 @@ static int bond_ab_arp_inspect(struct bonding *bond)
                 * before being taken out
                 */
                if (!bond_is_active_slave(slave) &&
-                   !bond->current_arp_slave &&
+                   !rcu_access_pointer(bond->current_arp_slave) &&
                    !bond_time_in_interval(bond, last_rx, 3)) {
                        slave->new_link = BOND_LINK_DOWN;
                        commit++;
@@ -2612,21 +2613,24 @@ static void bond_ab_arp_commit(struct bonding *bond)
 
                case BOND_LINK_UP:
                        trans_start = dev_trans_start(slave->dev);
-                       if (bond->curr_active_slave != slave ||
-                           (!bond->curr_active_slave &&
+                       if (rtnl_dereference(bond->curr_active_slave) != slave ||
+                           (!rtnl_dereference(bond->curr_active_slave) &&
                             bond_time_in_interval(bond, trans_start, 1))) {
+                               struct slave *current_arp_slave;
+
+                               current_arp_slave = rtnl_dereference(bond->current_arp_slave);
                                slave->link = BOND_LINK_UP;
-                               if (bond->current_arp_slave) {
+                               if (current_arp_slave) {
                                        bond_set_slave_inactive_flags(
-                                               bond->current_arp_slave,
+                                               current_arp_slave,
                                                BOND_SLAVE_NOTIFY_NOW);
-                                       bond->current_arp_slave = NULL;
+                                       RCU_INIT_POINTER(bond->current_arp_slave, NULL);
                                }
 
-                               pr_info("%s: link status definitely up for interface %s\n",
-                                       bond->dev->name, slave->dev->name);
+                               netdev_info(bond->dev, "link status definitely up for interface %s\n",
+                                           slave->dev->name);
 
-                               if (!bond->curr_active_slave ||
+                               if (!rtnl_dereference(bond->curr_active_slave) ||
                                    (slave == bond->primary_slave))
                                        goto do_failover;
 
@@ -2642,20 +2646,19 @@ static void bond_ab_arp_commit(struct bonding *bond)
                        bond_set_slave_inactive_flags(slave,
                                                      BOND_SLAVE_NOTIFY_NOW);
 
-                       pr_info("%s: link status definitely down for interface %s, disabling it\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "link status definitely down for interface %s, disabling it\n",
+                                   slave->dev->name);
 
-                       if (slave == bond->curr_active_slave) {
-                               bond->current_arp_slave = NULL;
+                       if (slave == rtnl_dereference(bond->curr_active_slave)) {
+                               RCU_INIT_POINTER(bond->current_arp_slave, NULL);
                                goto do_failover;
                        }
 
                        continue;
 
                default:
-                       pr_err("%s: impossible: new_link %d on slave %s\n",
-                              bond->dev->name, slave->new_link,
-                              slave->dev->name);
+                       netdev_err(bond->dev, "impossible: new_link %d on slave %s\n",
+                                  slave->new_link, slave->dev->name);
                        continue;
                }
 
@@ -2686,9 +2689,9 @@ static bool bond_ab_arp_probe(struct bonding *bond)
        bool should_notify_rtnl = BOND_SLAVE_NOTIFY_LATER;
 
        if (curr_arp_slave && curr_active_slave)
-               pr_info("PROBE: c_arp %s && cas %s BAD\n",
-                       curr_arp_slave->dev->name,
-                       curr_active_slave->dev->name);
+               netdev_info(bond->dev, "PROBE: c_arp %s && cas %s BAD\n",
+                           curr_arp_slave->dev->name,
+                           curr_active_slave->dev->name);
 
        if (curr_active_slave) {
                bond_arp_send_all(bond, curr_active_slave);
@@ -2729,8 +2732,8 @@ static bool bond_ab_arp_probe(struct bonding *bond)
                        bond_set_slave_inactive_flags(slave,
                                                      BOND_SLAVE_NOTIFY_LATER);
 
-                       pr_info("%s: backup interface %s is now down\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "backup interface %s is now down\n",
+                                   slave->dev->name);
                }
                if (slave == curr_arp_slave)
                        found = true;
@@ -2926,9 +2929,8 @@ static int bond_slave_netdev_event(unsigned long event,
                        break;
                }
 
-               pr_info("%s: Primary slave changed to %s, reselecting active slave\n",
-                       bond->dev->name,
-                       bond->primary_slave ? slave_dev->name : "none");
+               netdev_info(bond->dev, "Primary slave changed to %s, reselecting active slave\n",
+                           bond->primary_slave ? slave_dev->name : "none");
 
                block_netpoll_tx();
                write_lock_bh(&bond->curr_slave_lock);
@@ -2963,19 +2965,18 @@ static int bond_netdev_event(struct notifier_block *this,
 {
        struct net_device *event_dev = netdev_notifier_info_to_dev(ptr);
 
-       pr_debug("event_dev: %s, event: %lx\n",
-                event_dev ? event_dev->name : "None", event);
+       netdev_dbg(event_dev, "event: %lx\n", event);
 
        if (!(event_dev->priv_flags & IFF_BONDING))
                return NOTIFY_DONE;
 
        if (event_dev->flags & IFF_MASTER) {
-               pr_debug("IFF_MASTER\n");
+               netdev_dbg(event_dev, "IFF_MASTER\n");
                return bond_master_netdev_event(event, event_dev);
        }
 
        if (event_dev->flags & IFF_SLAVE) {
-               pr_debug("IFF_SLAVE\n");
+               netdev_dbg(event_dev, "IFF_SLAVE\n");
                return bond_slave_netdev_event(event, event_dev);
        }
 
@@ -2991,11 +2992,11 @@ static struct notifier_block bond_netdev_notifier = {
 /* L2 hash helper */
 static inline u32 bond_eth_hash(struct sk_buff *skb)
 {
-       struct ethhdr *data = (struct ethhdr *)skb->data;
-
-       if (skb_headlen(skb) >= offsetof(struct ethhdr, h_proto))
-               return data->h_dest[5] ^ data->h_source[5];
+       struct ethhdr *ep, hdr_tmp;
 
+       ep = skb_header_pointer(skb, 0, sizeof(hdr_tmp), &hdr_tmp);
+       if (ep)
+               return ep->h_dest[5] ^ ep->h_source[5] ^ ep->h_proto;
        return 0;
 }
 
@@ -3102,8 +3103,8 @@ static int bond_open(struct net_device *bond_dev)
        if (bond_has_slaves(bond)) {
                read_lock(&bond->curr_slave_lock);
                bond_for_each_slave(bond, slave, iter) {
-                       if (bond_uses_primary(bond)
-                               && (slave != bond->curr_active_slave)) {
+                       if (bond_uses_primary(bond) &&
+                           slave != rcu_access_pointer(bond->curr_active_slave)) {
                                bond_set_slave_inactive_flags(slave,
                                                              BOND_SLAVE_NOTIFY_NOW);
                        } else {
@@ -3217,7 +3218,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
        struct net *net;
        int res = 0;
 
-       pr_debug("bond_ioctl: master=%s, cmd=%d\n", bond_dev->name, cmd);
+       netdev_dbg(bond_dev, "bond_ioctl: cmd=%d\n", cmd);
 
        switch (cmd) {
        case SIOCGMIIPHY:
@@ -3287,12 +3288,12 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
 
        slave_dev = __dev_get_by_name(net, ifr->ifr_slave);
 
-       pr_debug("slave_dev=%p:\n", slave_dev);
+       netdev_dbg(bond_dev, "slave_dev=%p:\n", slave_dev);
 
        if (!slave_dev)
                return -ENODEV;
 
-       pr_debug("slave_dev->name=%s:\n", slave_dev->name);
+       netdev_dbg(bond_dev, "slave_dev->name=%s:\n", slave_dev->name);
        switch (cmd) {
        case BOND_ENSLAVE_OLD:
        case SIOCBONDENSLAVE:
@@ -3419,8 +3420,7 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
        struct list_head *iter;
        int res = 0;
 
-       pr_debug("bond=%p, name=%s, new_mtu=%d\n",
-                bond, bond_dev ? bond_dev->name : "None", new_mtu);
+       netdev_dbg(bond_dev, "bond=%p, new_mtu=%d\n", bond, new_mtu);
 
        /* Can't hold bond->lock with bh disabled here since
         * some base drivers panic. On the other hand we can't
@@ -3438,8 +3438,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
         */
 
        bond_for_each_slave(bond, slave, iter) {
-               pr_debug("s %p c_m %p\n",
-                        slave, slave->dev->netdev_ops->ndo_change_mtu);
+               netdev_dbg(bond_dev, "s %p c_m %p\n",
+                          slave, slave->dev->netdev_ops->ndo_change_mtu);
 
                res = dev_set_mtu(slave->dev, new_mtu);
 
@@ -3452,7 +3452,8 @@ static int bond_change_mtu(struct net_device *bond_dev, int new_mtu)
                         * means changing their mtu from timer context, which
                         * is probably not a good idea.
                         */
-                       pr_debug("err %d %s\n", res, slave->dev->name);
+                       netdev_dbg(bond_dev, "err %d %s\n", res,
+                                  slave->dev->name);
                        goto unwind;
                }
        }
@@ -3471,8 +3472,8 @@ unwind:
 
                tmp_res = dev_set_mtu(rollback_slave->dev, bond_dev->mtu);
                if (tmp_res) {
-                       pr_debug("unwind err %d dev %s\n",
-                                tmp_res, rollback_slave->dev->name);
+                       netdev_dbg(bond_dev, "unwind err %d dev %s\n",
+                                  tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3498,8 +3499,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
                return bond_alb_set_mac_address(bond_dev, addr);
 
 
-       pr_debug("bond=%p, name=%s\n",
-                bond, bond_dev ? bond_dev->name : "None");
+       netdev_dbg(bond_dev, "bond=%p\n", bond);
 
        /* If fail_over_mac is enabled, do nothing and return success.
         * Returning an error causes ifenslave to fail.
@@ -3527,7 +3527,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
         */
 
        bond_for_each_slave(bond, slave, iter) {
-               pr_debug("slave %p %s\n", slave, slave->dev->name);
+               netdev_dbg(bond_dev, "slave %p %s\n", slave, slave->dev->name);
                res = dev_set_mac_address(slave->dev, addr);
                if (res) {
                        /* TODO: consider downing the slave
@@ -3536,7 +3536,7 @@ static int bond_set_mac_address(struct net_device *bond_dev, void *addr)
                         * breakage anyway until ARP finish
                         * updating, so...
                         */
-                       pr_debug("err %d %s\n", res, slave->dev->name);
+                       netdev_dbg(bond_dev, "err %d %s\n", res, slave->dev->name);
                        goto unwind;
                }
        }
@@ -3558,8 +3558,8 @@ unwind:
 
                tmp_res = dev_set_mac_address(rollback_slave->dev, &tmp_sa);
                if (tmp_res) {
-                       pr_debug("unwind err %d dev %s\n",
-                                tmp_res, rollback_slave->dev->name);
+                       netdev_dbg(bond_dev, "unwind err %d dev %s\n",
+                                  tmp_res, rollback_slave->dev->name);
                }
        }
 
@@ -3806,8 +3806,7 @@ static netdev_tx_t __bond_start_xmit(struct sk_buff *skb, struct net_device *dev
                return bond_tlb_xmit(skb, dev);
        default:
                /* Should never happen, mode already checked */
-               pr_err("%s: Error: Unknown bonding mode %d\n",
-                      dev->name, BOND_MODE(bond));
+               netdev_err(dev, "Unknown bonding mode %d\n", BOND_MODE(bond));
                WARN_ON_ONCE(1);
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
@@ -3948,13 +3947,6 @@ void bond_setup(struct net_device *bond_dev)
        bond_dev->priv_flags |= IFF_BONDING | IFF_UNICAST_FLT;
        bond_dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
 
-       /* At first, we block adding VLANs. That's the only way to
-        * prevent problems that occur when adding VLANs over an
-        * empty bond. The block will be removed once non-challenged
-        * slaves are enslaved.
-        */
-       bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
-
        /* don't acquire bond device's netif_tx_lock when
         * transmitting */
        bond_dev->features |= NETIF_F_LLTX;
@@ -3975,6 +3967,7 @@ void bond_setup(struct net_device *bond_dev)
                                NETIF_F_HW_VLAN_CTAG_FILTER;
 
        bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
+       bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
        bond_dev->features |= bond_dev->hw_features;
 }
 
@@ -3993,7 +3986,7 @@ static void bond_uninit(struct net_device *bond_dev)
        /* Release the bonded slaves */
        bond_for_each_slave(bond, slave, iter)
                __bond_release_one(bond_dev, slave->dev, true);
-       pr_info("%s: Released all slaves\n", bond_dev->name);
+       netdev_info(bond_dev, "Released all slaves\n");
 
        list_del(&bond->bond_list);
 
@@ -4059,7 +4052,7 @@ static int bond_check_params(struct bond_params *params)
        }
 
        if (ad_select) {
-               bond_opt_initstr(&newval, lacp_rate);
+               bond_opt_initstr(&newval, ad_select);
                valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
                                        &newval);
                if (!valptr) {
@@ -4382,7 +4375,7 @@ static int bond_init(struct net_device *bond_dev)
        struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
        struct alb_bond_info *bond_info = &(BOND_ALB_INFO(bond));
 
-       pr_debug("Begin bond_init for %s\n", bond_dev->name);
+       netdev_dbg(bond_dev, "Begin bond_init\n");
 
        /*
         * Initialize locks that may be required during
@@ -4431,7 +4424,7 @@ int bond_create(struct net *net, const char *name)
        rtnl_lock();
 
        bond_dev = alloc_netdev_mq(sizeof(struct bonding),
-                                  name ? name : "bond%d",
+                                  name ? name : "bond%d", NET_NAME_UNKNOWN,
                                   bond_setup, tx_queues);
        if (!bond_dev) {
                pr_err("%s: eek! can't alloc netdev!\n", name);
@@ -4472,7 +4465,6 @@ static void __net_exit bond_net_exit(struct net *net)
        LIST_HEAD(list);
 
        bond_destroy_sysfs(bn);
-       bond_destroy_proc_dir(bn);
 
        /* Kill off any bonds created after unregistering bond rtnl ops */
        rtnl_lock();
@@ -4480,6 +4472,8 @@ static void __net_exit bond_net_exit(struct net *net)
                unregister_netdevice_queue(bond->dev, &list);
        unregister_netdevice_many(&list);
        rtnl_unlock();
+
+       bond_destroy_proc_dir(bn);
 }
 
 static struct pernet_operations bond_net_ops = {
index 5ab3c18..d163e11 100644 (file)
@@ -9,8 +9,6 @@
  * (at your option) any later version.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/netdevice.h>
@@ -181,8 +179,7 @@ static int bond_changelink(struct net_device *bond_dev,
                int arp_interval = nla_get_u32(data[IFLA_BOND_ARP_INTERVAL]);
 
                if (arp_interval && miimon) {
-                       pr_err("%s: ARP monitoring cannot be used with MII monitoring\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "ARP monitoring cannot be used with MII monitoring\n");
                        return -EINVAL;
                }
 
@@ -207,8 +204,7 @@ static int bond_changelink(struct net_device *bond_dev,
                        i++;
                }
                if (i == 0 && bond->params.arp_interval)
-                       pr_warn("%s: Removing last arp target with arp_interval on\n",
-                               bond->dev->name);
+                       netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
                if (err)
                        return err;
        }
@@ -216,8 +212,7 @@ static int bond_changelink(struct net_device *bond_dev,
                int arp_validate = nla_get_u32(data[IFLA_BOND_ARP_VALIDATE]);
 
                if (arp_validate && miimon) {
-                       pr_err("%s: ARP validating cannot be used with MII monitoring\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "ARP validating cannot be used with MII monitoring\n");
                        return -EINVAL;
                }
 
@@ -398,20 +393,31 @@ static size_t bond_get_size(const struct net_device *bond_dev)
                0;
 }
 
+static int bond_option_active_slave_get_ifindex(struct bonding *bond)
+{
+       const struct net_device *slave;
+       int ifindex;
+
+       rcu_read_lock();
+       slave = bond_option_active_slave_get_rcu(bond);
+       ifindex = slave ? slave->ifindex : 0;
+       rcu_read_unlock();
+       return ifindex;
+}
+
 static int bond_fill_info(struct sk_buff *skb,
                          const struct net_device *bond_dev)
 {
        struct bonding *bond = netdev_priv(bond_dev);
-       struct net_device *slave_dev = bond_option_active_slave_get(bond);
-       struct nlattr *targets;
        unsigned int packets_per_slave;
-       int i, targets_added;
+       int ifindex, i, targets_added;
+       struct nlattr *targets;
 
        if (nla_put_u8(skb, IFLA_BOND_MODE, BOND_MODE(bond)))
                goto nla_put_failure;
 
-       if (slave_dev &&
-           nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, slave_dev->ifindex))
+       ifindex = bond_option_active_slave_get_ifindex(bond);
+       if (ifindex && nla_put_u32(skb, IFLA_BOND_ACTIVE_SLAVE, ifindex))
                goto nla_put_failure;
 
        if (nla_put_u32(skb, IFLA_BOND_MIIMON, bond->params.miimon))
index 540e016..dc73463 100644 (file)
@@ -9,8 +9,6 @@
  * (at your option) any later version.
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
 #include <linux/errno.h>
 #include <linux/if.h>
 #include <linux/netdevice.h>
@@ -544,9 +542,8 @@ static void bond_opt_dep_print(struct bonding *bond,
        params = &bond->params;
        modeval = bond_opt_get_val(BOND_OPT_MODE, params->mode);
        if (test_bit(params->mode, &opt->unsuppmodes))
-               pr_err("%s: option %s: mode dependency failed, not supported in mode %s(%llu)\n",
-                      bond->dev->name, opt->name,
-                      modeval->string, modeval->value);
+               netdev_err(bond->dev, "option %s: mode dependency failed, not supported in mode %s(%llu)\n",
+                          opt->name, modeval->string, modeval->value);
 }
 
 static void bond_opt_error_interpret(struct bonding *bond,
@@ -564,31 +561,30 @@ static void bond_opt_error_interpret(struct bonding *bond,
                                p = strchr(val->string, '\n');
                                if (p)
                                        *p = '\0';
-                               pr_err("%s: option %s: invalid value (%s)\n",
-                                      bond->dev->name, opt->name, val->string);
+                               netdev_err(bond->dev, "option %s: invalid value (%s)\n",
+                                          opt->name, val->string);
                        } else {
-                               pr_err("%s: option %s: invalid value (%llu)\n",
-                                      bond->dev->name, opt->name, val->value);
+                               netdev_err(bond->dev, "option %s: invalid value (%llu)\n",
+                                          opt->name, val->value);
                        }
                }
                minval = bond_opt_get_flags(opt, BOND_VALFLAG_MIN);
                maxval = bond_opt_get_flags(opt, BOND_VALFLAG_MAX);
                if (!maxval)
                        break;
-               pr_err("%s: option %s: allowed values %llu - %llu\n",
-                      bond->dev->name, opt->name, minval ? minval->value : 0,
-                      maxval->value);
+               netdev_err(bond->dev, "option %s: allowed values %llu - %llu\n",
+                          opt->name, minval ? minval->value : 0, maxval->value);
                break;
        case -EACCES:
                bond_opt_dep_print(bond, opt);
                break;
        case -ENOTEMPTY:
-               pr_err("%s: option %s: unable to set because the bond device has slaves\n",
-                      bond->dev->name, opt->name);
+               netdev_err(bond->dev, "option %s: unable to set because the bond device has slaves\n",
+                          opt->name);
                break;
        case -EBUSY:
-               pr_err("%s: option %s: unable to set because the bond device is up\n",
-                      bond->dev->name, opt->name);
+               netdev_err(bond->dev, "option %s: unable to set because the bond device is up\n",
+                          opt->name);
                break;
        default:
                break;
@@ -671,17 +667,18 @@ const struct bond_option *bond_opt_get(unsigned int option)
        return &bond_opts[option];
 }
 
-int bond_option_mode_set(struct bonding *bond, const struct bond_opt_value *newval)
+static int bond_option_mode_set(struct bonding *bond,
+                               const struct bond_opt_value *newval)
 {
        if (!bond_mode_uses_arp(newval->value) && bond->params.arp_interval) {
-               pr_info("%s: %s mode is incompatible with arp monitoring, start mii monitoring\n",
-                       bond->dev->name, newval->string);
+               netdev_info(bond->dev, "%s mode is incompatible with arp monitoring, start mii monitoring\n",
+                           newval->string);
                /* disable arp monitoring */
                bond->params.arp_interval = 0;
                /* set miimon to default value */
                bond->params.miimon = BOND_DEFAULT_MIIMON;
-               pr_info("%s: Setting MII monitoring interval to %d\n",
-                       bond->dev->name, bond->params.miimon);
+               netdev_info(bond->dev, "Setting MII monitoring interval to %d\n",
+                           bond->params.miimon);
        }
 
        /* don't cache arp_validate between modes */
@@ -704,11 +701,6 @@ struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond)
        return __bond_option_active_slave_get(bond, slave);
 }
 
-struct net_device *bond_option_active_slave_get(struct bonding *bond)
-{
-       return __bond_option_active_slave_get(bond, bond->curr_active_slave);
-}
-
 static int bond_option_active_slave_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
@@ -727,14 +719,14 @@ static int bond_option_active_slave_set(struct bonding *bond,
 
        if (slave_dev) {
                if (!netif_is_bond_slave(slave_dev)) {
-                       pr_err("Device %s is not bonding slave\n",
-                              slave_dev->name);
+                       netdev_err(bond->dev, "Device %s is not bonding slave\n",
+                                  slave_dev->name);
                        return -EINVAL;
                }
 
                if (bond->dev != netdev_master_upper_dev_get(slave_dev)) {
-                       pr_err("%s: Device %s is not our slave\n",
-                              bond->dev->name, slave_dev->name);
+                       netdev_err(bond->dev, "Device %s is not our slave\n",
+                                  slave_dev->name);
                        return -EINVAL;
                }
        }
@@ -744,29 +736,29 @@ static int bond_option_active_slave_set(struct bonding *bond,
 
        /* check to see if we are clearing active */
        if (!slave_dev) {
-               pr_info("%s: Clearing current active slave\n", bond->dev->name);
+               netdev_info(bond->dev, "Clearing current active slave\n");
                RCU_INIT_POINTER(bond->curr_active_slave, NULL);
                bond_select_active_slave(bond);
        } else {
-               struct slave *old_active = bond->curr_active_slave;
+               struct slave *old_active = bond_deref_active_protected(bond);
                struct slave *new_active = bond_slave_get_rtnl(slave_dev);
 
                BUG_ON(!new_active);
 
                if (new_active == old_active) {
                        /* do nothing */
-                       pr_info("%s: %s is already the current active slave\n",
-                               bond->dev->name, new_active->dev->name);
+                       netdev_info(bond->dev, "%s is already the current active slave\n",
+                                   new_active->dev->name);
                } else {
                        if (old_active && (new_active->link == BOND_LINK_UP) &&
                            bond_slave_is_up(new_active)) {
-                               pr_info("%s: Setting %s as active slave\n",
-                                       bond->dev->name, new_active->dev->name);
+                               netdev_info(bond->dev, "Setting %s as active slave\n",
+                                           new_active->dev->name);
                                bond_change_active_slave(bond, new_active);
                        } else {
-                               pr_err("%s: Could not set %s as active slave; either %s is down or the link is down\n",
-                                      bond->dev->name, new_active->dev->name,
-                                      new_active->dev->name);
+                               netdev_err(bond->dev, "Could not set %s as active slave; either %s is down or the link is down\n",
+                                          new_active->dev->name,
+                                          new_active->dev->name);
                                ret = -EINVAL;
                        }
                }
@@ -785,20 +777,17 @@ static int bond_option_active_slave_set(struct bonding *bond,
 static int bond_option_miimon_set(struct bonding *bond,
                                  const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting MII monitoring interval to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting MII monitoring interval to %llu\n",
+                   newval->value);
        bond->params.miimon = newval->value;
        if (bond->params.updelay)
-               pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
-                       bond->dev->name,
+               netdev_info(bond->dev, "Note: Updating updelay (to %d) since it is a multiple of the miimon value\n",
                        bond->params.updelay * bond->params.miimon);
        if (bond->params.downdelay)
-               pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
-                       bond->dev->name,
-                       bond->params.downdelay * bond->params.miimon);
+               netdev_info(bond->dev, "Note: Updating downdelay (to %d) since it is a multiple of the miimon value\n",
+                           bond->params.downdelay * bond->params.miimon);
        if (newval->value && bond->params.arp_interval) {
-               pr_info("%s: MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n",
-                       bond->dev->name);
+               netdev_info(bond->dev, "MII monitoring cannot be used with ARP monitoring - disabling ARP monitoring...\n");
                bond->params.arp_interval = 0;
                if (bond->params.arp_validate)
                        bond->params.arp_validate = BOND_ARP_VALIDATE_NONE;
@@ -830,20 +819,18 @@ static int bond_option_updelay_set(struct bonding *bond,
        int value = newval->value;
 
        if (!bond->params.miimon) {
-               pr_err("%s: Unable to set up delay as MII monitoring is disabled\n",
-                      bond->dev->name);
+               netdev_err(bond->dev, "Unable to set up delay as MII monitoring is disabled\n");
                return -EPERM;
        }
        if ((value % bond->params.miimon) != 0) {
-               pr_warn("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
-                       bond->dev->name, value,
-                       bond->params.miimon,
-                       (value / bond->params.miimon) *
-                       bond->params.miimon);
+               netdev_warn(bond->dev, "up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n",
+                           value, bond->params.miimon,
+                           (value / bond->params.miimon) *
+                           bond->params.miimon);
        }
        bond->params.updelay = value / bond->params.miimon;
-       pr_info("%s: Setting up delay to %d\n",
-               bond->dev->name, bond->params.updelay * bond->params.miimon);
+       netdev_info(bond->dev, "Setting up delay to %d\n",
+                   bond->params.updelay * bond->params.miimon);
 
        return 0;
 }
@@ -854,20 +841,18 @@ static int bond_option_downdelay_set(struct bonding *bond,
        int value = newval->value;
 
        if (!bond->params.miimon) {
-               pr_err("%s: Unable to set down delay as MII monitoring is disabled\n",
-                      bond->dev->name);
+               netdev_err(bond->dev, "Unable to set down delay as MII monitoring is disabled\n");
                return -EPERM;
        }
        if ((value % bond->params.miimon) != 0) {
-               pr_warn("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
-                       bond->dev->name, value,
-                       bond->params.miimon,
-                       (value / bond->params.miimon) *
-                       bond->params.miimon);
+               netdev_warn(bond->dev, "down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n",
+                           value, bond->params.miimon,
+                           (value / bond->params.miimon) *
+                           bond->params.miimon);
        }
        bond->params.downdelay = value / bond->params.miimon;
-       pr_info("%s: Setting down delay to %d\n",
-               bond->dev->name, bond->params.downdelay * bond->params.miimon);
+       netdev_info(bond->dev, "Setting down delay to %d\n",
+                   bond->params.downdelay * bond->params.miimon);
 
        return 0;
 }
@@ -875,8 +860,8 @@ static int bond_option_downdelay_set(struct bonding *bond,
 static int bond_option_use_carrier_set(struct bonding *bond,
                                       const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting use_carrier to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting use_carrier to %llu\n",
+                   newval->value);
        bond->params.use_carrier = newval->value;
 
        return 0;
@@ -889,18 +874,16 @@ static int bond_option_use_carrier_set(struct bonding *bond,
 static int bond_option_arp_interval_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting ARP monitoring interval to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting ARP monitoring interval to %llu\n",
+                   newval->value);
        bond->params.arp_interval = newval->value;
        if (newval->value) {
                if (bond->params.miimon) {
-                       pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring\n",
-                               bond->dev->name, bond->dev->name);
+                       netdev_info(bond->dev, "ARP monitoring cannot be used with MII monitoring. Disabling MII monitoring\n");
                        bond->params.miimon = 0;
                }
                if (!bond->params.arp_targets[0])
-                       pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified\n",
-                               bond->dev->name);
+                       netdev_info(bond->dev, "ARP monitoring has been set up, but no ARP targets have been specified\n");
        }
        if (bond->dev->flags & IFF_UP) {
                /* If the interface is up, we may need to fire off
@@ -944,24 +927,24 @@ static int _bond_option_arp_ip_target_add(struct bonding *bond, __be32 target)
        int ind;
 
        if (!bond_is_ip_target_ok(target)) {
-               pr_err("%s: invalid ARP target %pI4 specified for addition\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "invalid ARP target %pI4 specified for addition\n",
+                          &target);
                return -EINVAL;
        }
 
        if (bond_get_targets_ip(targets, target) != -1) { /* dup */
-               pr_err("%s: ARP target %pI4 is already present\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "ARP target %pI4 is already present\n",
+                          &target);
                return -EINVAL;
        }
 
        ind = bond_get_targets_ip(targets, 0); /* first free slot */
        if (ind == -1) {
-               pr_err("%s: ARP target table is full!\n", bond->dev->name);
+               netdev_err(bond->dev, "ARP target table is full!\n");
                return -EINVAL;
        }
 
-       pr_info("%s: Adding ARP target %pI4\n", bond->dev->name, &target);
+       netdev_info(bond->dev, "Adding ARP target %pI4\n", &target);
 
        _bond_options_arp_ip_target_set(bond, ind, target, jiffies);
 
@@ -989,23 +972,22 @@ static int bond_option_arp_ip_target_rem(struct bonding *bond, __be32 target)
        int ind, i;
 
        if (!bond_is_ip_target_ok(target)) {
-               pr_err("%s: invalid ARP target %pI4 specified for removal\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "invalid ARP target %pI4 specified for removal\n",
+                          &target);
                return -EINVAL;
        }
 
        ind = bond_get_targets_ip(targets, target);
        if (ind == -1) {
-               pr_err("%s: unable to remove nonexistent ARP target %pI4\n",
-                      bond->dev->name, &target);
+               netdev_err(bond->dev, "unable to remove nonexistent ARP target %pI4\n",
+                          &target);
                return -EINVAL;
        }
 
        if (ind == 0 && !targets[1] && bond->params.arp_interval)
-               pr_warn("%s: Removing last arp target with arp_interval on\n",
-                       bond->dev->name);
+               netdev_warn(bond->dev, "Removing last arp target with arp_interval on\n");
 
-       pr_info("%s: Removing ARP target %pI4\n", bond->dev->name, &target);
+       netdev_info(bond->dev, "Removing ARP target %pI4\n", &target);
 
        /* not to race with bond_arp_rcv */
        write_lock_bh(&bond->lock);
@@ -1044,8 +1026,8 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
 
        if (newval->string) {
                if (!in4_pton(newval->string+1, -1, (u8 *)&target, -1, NULL)) {
-                       pr_err("%s: invalid ARP target %pI4 specified\n",
-                              bond->dev->name, &target);
+                       netdev_err(bond->dev, "invalid ARP target %pI4 specified\n",
+                                  &target);
                        return ret;
                }
                if (newval->string[0] == '+')
@@ -1053,8 +1035,7 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
                else if (newval->string[0] == '-')
                        ret = bond_option_arp_ip_target_rem(bond, target);
                else
-                       pr_err("no command found in arp_ip_targets file for bond %s - use +<addr> or -<addr>\n",
-                              bond->dev->name);
+                       netdev_err(bond->dev, "no command found in arp_ip_targets file - use +<addr> or -<addr>\n");
        } else {
                target = newval->value;
                ret = bond_option_arp_ip_target_add(bond, target);
@@ -1066,8 +1047,8 @@ static int bond_option_arp_ip_targets_set(struct bonding *bond,
 static int bond_option_arp_validate_set(struct bonding *bond,
                                        const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting arp_validate to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting arp_validate to %s (%llu)\n",
+                   newval->string, newval->value);
 
        if (bond->dev->flags & IFF_UP) {
                if (!newval->value)
@@ -1083,8 +1064,8 @@ static int bond_option_arp_validate_set(struct bonding *bond,
 static int bond_option_arp_all_targets_set(struct bonding *bond,
                                           const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting arp_all_targets to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting arp_all_targets to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.arp_all_targets = newval->value;
 
        return 0;
@@ -1106,7 +1087,7 @@ static int bond_option_primary_set(struct bonding *bond,
                *p = '\0';
        /* check to see if we are clearing primary */
        if (!strlen(primary)) {
-               pr_info("%s: Setting primary slave to None\n", bond->dev->name);
+               netdev_info(bond->dev, "Setting primary slave to None\n");
                bond->primary_slave = NULL;
                memset(bond->params.primary, 0, sizeof(bond->params.primary));
                bond_select_active_slave(bond);
@@ -1115,8 +1096,8 @@ static int bond_option_primary_set(struct bonding *bond,
 
        bond_for_each_slave(bond, slave, iter) {
                if (strncmp(slave->dev->name, primary, IFNAMSIZ) == 0) {
-                       pr_info("%s: Setting %s as primary slave\n",
-                               bond->dev->name, slave->dev->name);
+                       netdev_info(bond->dev, "Setting %s as primary slave\n",
+                                   slave->dev->name);
                        bond->primary_slave = slave;
                        strcpy(bond->params.primary, slave->dev->name);
                        bond_select_active_slave(bond);
@@ -1125,15 +1106,15 @@ static int bond_option_primary_set(struct bonding *bond,
        }
 
        if (bond->primary_slave) {
-               pr_info("%s: Setting primary slave to None\n", bond->dev->name);
+               netdev_info(bond->dev, "Setting primary slave to None\n");
                bond->primary_slave = NULL;
                bond_select_active_slave(bond);
        }
        strncpy(bond->params.primary, primary, IFNAMSIZ);
        bond->params.primary[IFNAMSIZ - 1] = 0;
 
-       pr_info("%s: Recording %s as primary, but it has not been enslaved to %s yet\n",
-               bond->dev->name, primary, bond->dev->name);
+       netdev_info(bond->dev, "Recording %s as primary, but it has not been enslaved to %s yet\n",
+                   primary, bond->dev->name);
 
 out:
        write_unlock_bh(&bond->curr_slave_lock);
@@ -1146,8 +1127,8 @@ out:
 static int bond_option_primary_reselect_set(struct bonding *bond,
                                            const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting primary_reselect to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting primary_reselect to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.primary_reselect = newval->value;
 
        block_netpoll_tx();
@@ -1162,8 +1143,8 @@ static int bond_option_primary_reselect_set(struct bonding *bond,
 static int bond_option_fail_over_mac_set(struct bonding *bond,
                                         const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting fail_over_mac to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting fail_over_mac to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.fail_over_mac = newval->value;
 
        return 0;
@@ -1172,8 +1153,8 @@ static int bond_option_fail_over_mac_set(struct bonding *bond,
 static int bond_option_xmit_hash_policy_set(struct bonding *bond,
                                            const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting xmit hash policy to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting xmit hash policy to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.xmit_policy = newval->value;
 
        return 0;
@@ -1182,8 +1163,8 @@ static int bond_option_xmit_hash_policy_set(struct bonding *bond,
 static int bond_option_resend_igmp_set(struct bonding *bond,
                                       const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting resend_igmp to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting resend_igmp to %llu\n",
+                   newval->value);
        bond->params.resend_igmp = newval->value;
 
        return 0;
@@ -1221,8 +1202,8 @@ static int bond_option_all_slaves_active_set(struct bonding *bond,
 static int bond_option_min_links_set(struct bonding *bond,
                                     const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting min links value to %llu\n",
-               bond->dev->name, newval->value);
+       netdev_info(bond->dev, "Setting min links value to %llu\n",
+                   newval->value);
        bond->params.min_links = newval->value;
 
        return 0;
@@ -1257,8 +1238,8 @@ static int bond_option_pps_set(struct bonding *bond,
 static int bond_option_lacp_rate_set(struct bonding *bond,
                                     const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting LACP rate to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting LACP rate to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.lacp_fast = newval->value;
        bond_3ad_update_lacp_rate(bond);
 
@@ -1268,8 +1249,8 @@ static int bond_option_lacp_rate_set(struct bonding *bond,
 static int bond_option_ad_select_set(struct bonding *bond,
                                     const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting ad_select to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting ad_select to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.ad_select = newval->value;
 
        return 0;
@@ -1330,7 +1311,7 @@ out:
        return ret;
 
 err_no_cmd:
-       pr_info("invalid input for queue_id set for %s\n", bond->dev->name);
+       netdev_info(bond->dev, "invalid input for queue_id set\n");
        ret = -EPERM;
        goto out;
 
@@ -1352,20 +1333,20 @@ static int bond_option_slaves_set(struct bonding *bond,
 
        dev = __dev_get_by_name(dev_net(bond->dev), ifname);
        if (!dev) {
-               pr_info("%s: interface %s does not exist!\n",
-                       bond->dev->name, ifname);
+               netdev_info(bond->dev, "interface %s does not exist!\n",
+                           ifname);
                ret = -ENODEV;
                goto out;
        }
 
        switch (command[0]) {
        case '+':
-               pr_info("%s: Adding slave %s\n", bond->dev->name, dev->name);
+               netdev_info(bond->dev, "Adding slave %s\n", dev->name);
                ret = bond_enslave(bond->dev, dev);
                break;
 
        case '-':
-               pr_info("%s: Removing slave %s\n", bond->dev->name, dev->name);
+               netdev_info(bond->dev, "Removing slave %s\n", dev->name);
                ret = bond_release(bond->dev, dev);
                break;
 
@@ -1377,8 +1358,7 @@ out:
        return ret;
 
 err_no_cmd:
-       pr_err("no command found in slaves file for bond %s - use +ifname or -ifname\n",
-              bond->dev->name);
+       netdev_err(bond->dev, "no command found in slaves file - use +ifname or -ifname\n");
        ret = -EPERM;
        goto out;
 }
@@ -1386,8 +1366,8 @@ err_no_cmd:
 static int bond_option_tlb_dynamic_lb_set(struct bonding *bond,
                                          const struct bond_opt_value *newval)
 {
-       pr_info("%s: Setting dynamic-lb to %s (%llu)\n",
-               bond->dev->name, newval->string, newval->value);
+       netdev_info(bond->dev, "Setting dynamic-lb to %s (%llu)\n",
+                   newval->string, newval->value);
        bond->params.tlb_dynamic_lb = newval->value;
 
        return 0;
index b215b47..de62c03 100644 (file)
@@ -252,8 +252,8 @@ void bond_create_proc_entry(struct bonding *bond)
                                                    S_IRUGO, bn->proc_dir,
                                                    &bond_info_fops, bond);
                if (bond->proc_entry == NULL)
-                       pr_warn("Warning: Cannot create /proc/net/%s/%s\n",
-                               DRV_NAME, bond_dev->name);
+                       netdev_warn(bond_dev, "Cannot create /proc/net/%s/%s\n",
+                                   DRV_NAME, bond_dev->name);
                else
                        memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
        }
index daed52f..98db8ed 100644 (file)
@@ -492,8 +492,9 @@ static ssize_t bonding_show_mii_status(struct device *d,
                                       char *buf)
 {
        struct bonding *bond = to_bond(d);
+       bool active = !!rcu_access_pointer(bond->curr_active_slave);
 
-       return sprintf(buf, "%s\n", bond->curr_active_slave ? "up" : "down");
+       return sprintf(buf, "%s\n", active ? "up" : "down");
 }
 static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL);
 
index 0b4d9cd..aace510 100644 (file)
@@ -36,7 +36,6 @@
 
 #define bond_version DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"
 
-#define BOND_MAX_VLAN_ENCAP    2
 #define BOND_MAX_ARP_TARGETS   16
 
 #define BOND_DEFAULT_MIIMON    100
@@ -194,8 +193,8 @@ struct slave {
  */
 struct bonding {
        struct   net_device *dev; /* first - useful for panic debug */
-       struct   slave *curr_active_slave;
-       struct   slave *current_arp_slave;
+       struct   slave __rcu *curr_active_slave;
+       struct   slave __rcu *current_arp_slave;
        struct   slave *primary_slave;
        bool     force_primary;
        s32      slave_cnt; /* never change this value outside the attach/detach wrappers */
@@ -232,6 +231,10 @@ struct bonding {
 #define bond_slave_get_rtnl(dev) \
        ((struct slave *) rtnl_dereference(dev->rx_handler_data))
 
+#define bond_deref_active_protected(bond)                                 \
+       rcu_dereference_protected(bond->curr_active_slave,                 \
+                                 lockdep_is_held(&bond->curr_slave_lock))
+
 struct bond_vlan_tag {
        __be16          vlan_proto;
        unsigned short  vlan_id;
@@ -265,6 +268,12 @@ static inline bool bond_is_lb(const struct bonding *bond)
               BOND_MODE(bond) == BOND_MODE_ALB;
 }
 
+static inline bool bond_is_nondyn_tlb(const struct bonding *bond)
+{
+       return (BOND_MODE(bond) == BOND_MODE_TLB)  &&
+              (bond->params.tlb_dynamic_lb == 0);
+}
+
 static inline bool bond_mode_uses_arp(int mode)
 {
        return mode != BOND_MODE_8023AD && mode != BOND_MODE_TLB &&
@@ -514,11 +523,10 @@ unsigned int bond_get_num_tx_queues(void);
 int bond_netlink_init(void);
 void bond_netlink_fini(void);
 struct net_device *bond_option_active_slave_get_rcu(struct bonding *bond);
-struct net_device *bond_option_active_slave_get(struct bonding *bond);
 const char *bond_slave_link_status(s8 link);
-bool bond_verify_device_path(struct net_device *start_dev,
-                            struct net_device *end_dev,
-                            struct bond_vlan_tag *tags);
+struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
+                                             struct net_device *end_dev,
+                                             int level);
 
 #ifdef CONFIG_PROC_FS
 void bond_create_proc_entry(struct bonding *bond);
index fc73865..27bbc56 100644 (file)
@@ -349,7 +349,8 @@ static int ldisc_open(struct tty_struct *tty)
        result = snprintf(name, sizeof(name), "cf%s", tty->name);
        if (result >= IFNAMSIZ)
                return -EINVAL;
-       dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+       dev = alloc_netdev(sizeof(*ser), name, NET_NAME_UNKNOWN,
+                          caifdev_setup);
        if (!dev)
                return -ENOMEM;
 
index ff54c0e..72ea9ff 100644 (file)
@@ -730,8 +730,8 @@ int cfspi_spi_probe(struct platform_device *pdev)
        int res;
        dev = (struct cfspi_dev *)pdev->dev.platform_data;
 
-       ndev = alloc_netdev(sizeof(struct cfspi),
-                       "cfspi%d", cfspi_setup);
+       ndev = alloc_netdev(sizeof(struct cfspi), "cfspi%d",
+                           NET_NAME_UNKNOWN, cfspi_setup);
        if (!dev)
                return -ENODEV;
 
index 9856086..a5fefb9 100644 (file)
@@ -661,7 +661,7 @@ static int cfv_probe(struct virtio_device *vdev)
        int err = -EINVAL;
 
        netdev = alloc_netdev(sizeof(struct cfv_info), cfv_netdev_name,
-                             cfv_netdev_setup);
+                             NET_NAME_UNKNOWN, cfv_netdev_setup);
        if (!netdev)
                return -ENOMEM;
 
index 824108c..5dede6e 100644 (file)
@@ -208,40 +208,31 @@ static int c_can_plat_probe(struct platform_device *pdev)
        }
 
        /* get the appropriate clk */
-       clk = clk_get(&pdev->dev, NULL);
+       clk = devm_clk_get(&pdev->dev, NULL);
        if (IS_ERR(clk)) {
-               dev_err(&pdev->dev, "no clock defined\n");
-               ret = -ENODEV;
+               ret = PTR_ERR(clk);
                goto exit;
        }
 
        /* get the platform data */
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        irq = platform_get_irq(pdev, 0);
-       if (!mem || irq <= 0) {
+       if (irq <= 0) {
                ret = -ENODEV;
-               goto exit_free_clk;
-       }
-
-       if (!request_mem_region(mem->start, resource_size(mem),
-                               KBUILD_MODNAME)) {
-               dev_err(&pdev->dev, "resource unavailable\n");
-               ret = -ENODEV;
-               goto exit_free_clk;
+               goto exit;
        }
 
-       addr = ioremap(mem->start, resource_size(mem));
-       if (!addr) {
-               dev_err(&pdev->dev, "failed to map can port\n");
-               ret = -ENOMEM;
-               goto exit_release_mem;
+       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       addr = devm_ioremap_resource(&pdev->dev, mem);
+       if (IS_ERR(addr)) {
+               ret =  PTR_ERR(addr);
+               goto exit;
        }
 
        /* allocate the c_can device */
        dev = alloc_c_can_dev();
        if (!dev) {
                ret = -ENOMEM;
-               goto exit_iounmap;
+               goto exit;
        }
 
        priv = netdev_priv(dev);
@@ -287,7 +278,8 @@ static int c_can_plat_probe(struct platform_device *pdev)
                        break;
                }
 
-               priv->raminit_ctrlreg = devm_ioremap_resource(&pdev->dev, res);
+               priv->raminit_ctrlreg = devm_ioremap(&pdev->dev, res->start,
+                                                    resource_size(res));
                if (IS_ERR(priv->raminit_ctrlreg) || priv->instance < 0)
                        dev_info(&pdev->dev, "control memory is not used for raminit\n");
                else
@@ -321,12 +313,6 @@ static int c_can_plat_probe(struct platform_device *pdev)
 
 exit_free_device:
        free_c_can_dev(dev);
-exit_iounmap:
-       iounmap(addr);
-exit_release_mem:
-       release_mem_region(mem->start, resource_size(mem));
-exit_free_clk:
-       clk_put(clk);
 exit:
        dev_err(&pdev->dev, "probe failed\n");
 
@@ -336,18 +322,10 @@ exit:
 static int c_can_plat_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
-       struct c_can_priv *priv = netdev_priv(dev);
-       struct resource *mem;
 
        unregister_c_can_dev(dev);
 
        free_c_can_dev(dev);
-       iounmap(priv->base);
-
-       mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       release_mem_region(mem->start, resource_size(mem));
-
-       clk_put(priv->priv);
 
        return 0;
 }
index e318e87..9f91fcb 100644 (file)
@@ -565,7 +565,7 @@ struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
        else
                size = sizeof_priv;
 
-       dev = alloc_netdev(size, "can%d", can_setup);
+       dev = alloc_netdev(size, "can%d", NET_NAME_UNKNOWN, can_setup);
        if (!dev)
                return NULL;
 
index f31499a..d169215 100644 (file)
@@ -141,6 +141,7 @@ static void set_normal_mode(struct net_device *dev)
 {
        struct sja1000_priv *priv = netdev_priv(dev);
        unsigned char status = priv->read_reg(priv, SJA1000_MOD);
+       u8 mod_reg_val = 0x00;
        int i;
 
        for (i = 0; i < 100; i++) {
@@ -158,9 +159,10 @@ static void set_normal_mode(struct net_device *dev)
 
                /* set chip to normal mode */
                if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)
-                       priv->write_reg(priv, SJA1000_MOD, MOD_LOM);
-               else
-                       priv->write_reg(priv, SJA1000_MOD, 0x00);
+                       mod_reg_val |= MOD_LOM;
+               if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK)
+                       mod_reg_val |= MOD_STM;
+               priv->write_reg(priv, SJA1000_MOD, mod_reg_val);
 
                udelay(10);
 
@@ -278,6 +280,7 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
        uint8_t dlc;
        canid_t id;
        uint8_t dreg;
+       u8 cmd_reg_val = 0x00;
        int i;
 
        if (can_dropped_invalid_skb(dev, skb))
@@ -312,9 +315,14 @@ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb,
        can_put_echo_skb(skb, dev, 0);
 
        if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT)
-               sja1000_write_cmdreg(priv, CMD_TR | CMD_AT);
+               cmd_reg_val |= CMD_AT;
+
+       if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)
+               cmd_reg_val |= CMD_SRR;
        else
-               sja1000_write_cmdreg(priv, CMD_TR);
+               cmd_reg_val |= CMD_TR;
+
+       sja1000_write_cmdreg(priv, cmd_reg_val);
 
        return NETDEV_TX_OK;
 }
@@ -622,9 +630,12 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
        priv->can.do_set_bittiming = sja1000_set_bittiming;
        priv->can.do_set_mode = sja1000_set_mode;
        priv->can.do_get_berr_counter = sja1000_get_berr_counter;
-       priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES |
-               CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY |
-               CAN_CTRLMODE_ONE_SHOT;
+       priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
+                                      CAN_CTRLMODE_LISTENONLY |
+                                      CAN_CTRLMODE_3_SAMPLES |
+                                      CAN_CTRLMODE_ONE_SHOT |
+                                      CAN_CTRLMODE_BERR_REPORTING |
+                                      CAN_CTRLMODE_PRESUME_ACK;
 
        spin_lock_init(&priv->cmdreg_lock);
 
index dcf9196..acb5b92 100644 (file)
@@ -52,6 +52,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/kernel.h>
+#include <linux/workqueue.h>
 #include <linux/can.h>
 #include <linux/can/skb.h>
 
@@ -85,6 +86,7 @@ struct slcan {
        struct tty_struct       *tty;           /* ptr to TTY structure      */
        struct net_device       *dev;           /* easy for intr handling    */
        spinlock_t              lock;
+       struct work_struct      tx_work;        /* Flushes transmit buffer   */
 
        /* These are pointers to the malloc()ed frame buffers. */
        unsigned char           rbuff[SLC_MTU]; /* receiver buffer           */
@@ -309,36 +311,46 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
        sl->dev->stats.tx_bytes += cf->can_dlc;
 }
 
-/*
- * Called by the driver when there's room for more data.  If we have
- * more packets to send, we send them here.
- */
-static void slcan_write_wakeup(struct tty_struct *tty)
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slcan_transmit(struct work_struct *work)
 {
+       struct slcan *sl = container_of(work, struct slcan, tx_work);
        int actual;
-       struct slcan *sl = (struct slcan *) tty->disc_data;
 
+       spin_lock_bh(&sl->lock);
        /* First make sure we're connected. */
-       if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev))
+       if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
+               spin_unlock_bh(&sl->lock);
                return;
+       }
 
-       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
-               clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
                spin_unlock_bh(&sl->lock);
                netif_wake_queue(sl->dev);
                return;
        }
 
-       actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+       actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
        spin_unlock_bh(&sl->lock);
 }
 
+/*
+ * Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slcan_write_wakeup(struct tty_struct *tty)
+{
+       struct slcan *sl = tty->disc_data;
+
+       schedule_work(&sl->tx_work);
+}
+
 /* Send a can_frame to a TTY queue. */
 static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -517,7 +529,7 @@ static struct slcan *slc_alloc(dev_t line)
                return NULL;
 
        sprintf(name, "slcan%d", i);
-       dev = alloc_netdev(sizeof(*sl), name, slc_setup);
+       dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, slc_setup);
        if (!dev)
                return NULL;
 
@@ -528,6 +540,7 @@ static struct slcan *slc_alloc(dev_t line)
        sl->magic = SLCAN_MAGIC;
        sl->dev = dev;
        spin_lock_init(&sl->lock);
+       INIT_WORK(&sl->tx_work, slcan_transmit);
        slcan_devs[i] = dev;
 
        return sl;
@@ -626,8 +639,12 @@ static void slcan_close(struct tty_struct *tty)
        if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
                return;
 
+       spin_lock_bh(&sl->lock);
        tty->disc_data = NULL;
        sl->tty = NULL;
+       spin_unlock_bh(&sl->lock);
+
+       flush_work(&sl->tx_work);
 
        /* Flush network side */
        unregister_netdev(sl->dev);
index 29e272c..64c016a 100644 (file)
@@ -1496,7 +1496,6 @@ e100_set_config(struct net_device *dev, struct ifmap *map)
                case IF_PORT_AUI:
                        spin_unlock(&np->lock);
                        return -EOPNOTSUPP;
-                       break;
                default:
                        printk(KERN_ERR "%s: Invalid media selected", dev->name);
                        spin_unlock(&np->lock);
index 0932ffb..ff435fb 100644 (file)
@@ -164,7 +164,7 @@ static int __init dummy_init_one(void)
        struct net_device *dev_dummy;
        int err;
 
-       dev_dummy = alloc_netdev(0, "dummy%d", dummy_setup);
+       dev_dummy = alloc_netdev(0, "dummy%d", NET_NAME_UNKNOWN, dummy_setup);
        if (!dev_dummy)
                return -ENOMEM;
 
index 7a79b60..957e5c0 100644 (file)
@@ -585,7 +585,8 @@ static int __init eql_init_module(void)
 
        pr_info("%s\n", version);
 
-       dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", eql_setup);
+       dev_eql = alloc_netdev(sizeof(equalizer_t), "eql", NET_NAME_UNKNOWN,
+                              eql_setup);
        if (!dev_eql)
                return -ENOMEM;
 
index 599311f..b96e885 100644 (file)
@@ -986,7 +986,7 @@ static void ethdev_setup(struct net_device *dev)
 static struct net_device *____alloc_ei_netdev(int size)
 {
        return alloc_netdev(sizeof(struct ei_device) + size, "eth%d",
-                               ethdev_setup);
+                           NET_NAME_UNKNOWN, ethdev_setup);
 }
 
 
index 90e825e..65cf60f 100644 (file)
@@ -178,10 +178,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
                case NUBUS_DRHW_APPLE_SONIC_LC:
                case NUBUS_DRHW_SONNET:
                        return MAC8390_NONE;
-                       break;
                default:
                        return MAC8390_APPLE;
-                       break;
                }
                break;
 
@@ -189,13 +187,10 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
                switch (dev->dr_hw) {
                case NUBUS_DRHW_ASANTE_LC:
                        return MAC8390_NONE;
-                       break;
                case NUBUS_DRHW_CABLETRON:
                        return MAC8390_CABLETRON;
-                       break;
                default:
                        return MAC8390_APPLE;
-                       break;
                }
                break;
 
@@ -220,10 +215,8 @@ static enum mac8390_type __init mac8390_ident(struct nubus_dev *dev)
                switch (dev->dr_hw) {
                case NUBUS_DRHW_INTERLAN:
                        return MAC8390_INTERLAN;
-                       break;
                default:
                        return MAC8390_KINETICS;
-                       break;
                }
                break;
 
@@ -563,7 +556,6 @@ static int __init mac8390_initdev(struct net_device *dev,
                case ACCESS_UNKNOWN:
                        pr_err("Don't know how to access card memory!\n");
                        return -ENODEV;
-                       break;
 
                case ACCESS_16:
                        /* 16 bit card, register map is reversed */
index f952fff..c9cd359 100644 (file)
@@ -52,8 +52,7 @@ config BFIN_TX_DESC_NUM
 config BFIN_RX_DESC_NUM
        int "Number of receive buffer packets"
        depends on BFIN_MAC
-       range 20 100 if BFIN_MAC_USE_L1
-       range 20 800
+       range 20 64
        default "20"
        ---help---
          Set the number of buffer packets used in driver.
index 7ae74d4..afa6684 100644 (file)
@@ -1218,11 +1218,11 @@ out:
 #define RX_ERROR_MASK (RX_LONG | RX_ALIGN | RX_CRC | RX_LEN | \
        RX_FRAG | RX_ADDR | RX_DMAO | RX_PHY | RX_LATE | RX_RANGE)
 
-static void bfin_mac_rx(struct net_device *dev)
+static void bfin_mac_rx(struct bfin_mac_local *lp)
 {
+       struct net_device *dev = lp->ndev;
        struct sk_buff *skb, *new_skb;
        unsigned short len;
-       struct bfin_mac_local *lp __maybe_unused = netdev_priv(dev);
 #if defined(BFIN_MAC_CSUM_OFFLOAD)
        unsigned int i;
        unsigned char fcs[ETH_FCS_LEN + 1];
@@ -1256,7 +1256,7 @@ static void bfin_mac_rx(struct net_device *dev)
        current_rx_ptr->skb = new_skb;
        current_rx_ptr->desc_a.start_addr = (unsigned long)new_skb->data - 2;
 
-       len = (unsigned short)((current_rx_ptr->status.status_word) & RX_FRLEN);
+       len = (unsigned short)(current_rx_ptr->status.status_word & RX_FRLEN);
        /* Deduce Ethernet FCS length from Ethernet payload length */
        len -= ETH_FCS_LEN;
        skb_put(skb, len);
@@ -1294,7 +1294,8 @@ static void bfin_mac_rx(struct net_device *dev)
        }
 #endif
 
-       netif_rx(skb);
+       napi_gro_receive(&lp->napi, skb);
+
        dev->stats.rx_packets++;
        dev->stats.rx_bytes += len;
 out:
@@ -1302,41 +1303,52 @@ out:
        current_rx_ptr = current_rx_ptr->next;
 }
 
+static int bfin_mac_poll(struct napi_struct *napi, int budget)
+{
+       int i = 0;
+       struct bfin_mac_local *lp = container_of(napi,
+                                                struct bfin_mac_local,
+                                                napi);
+
+       while (current_rx_ptr->status.status_word != 0 && i < budget) {
+               bfin_mac_rx(lp);
+               i++;
+       }
+
+       if (i < budget) {
+               napi_complete(napi);
+               if (test_and_clear_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags))
+                       enable_irq(IRQ_MAC_RX);
+       }
+
+       return i;
+}
+
 /* interrupt routine to handle rx and error signal */
 static irqreturn_t bfin_mac_interrupt(int irq, void *dev_id)
 {
-       struct net_device *dev = dev_id;
-       int number = 0;
-
-get_one_packet:
-       if (current_rx_ptr->status.status_word == 0) {
-               /* no more new packet received */
-               if (number == 0) {
-                       if (current_rx_ptr->next->status.status_word != 0) {
-                               current_rx_ptr = current_rx_ptr->next;
-                               goto real_rx;
-                       }
-               }
-               bfin_write_DMA1_IRQ_STATUS(bfin_read_DMA1_IRQ_STATUS() |
-                                          DMA_DONE | DMA_ERR);
-               return IRQ_HANDLED;
+       struct bfin_mac_local *lp = netdev_priv(dev_id);
+       u32 status;
+
+       status = bfin_read_DMA1_IRQ_STATUS();
+
+       bfin_write_DMA1_IRQ_STATUS(status | DMA_DONE | DMA_ERR);
+       if (status & DMA_DONE) {
+               disable_irq_nosync(IRQ_MAC_RX);
+               set_bit(BFIN_MAC_RX_IRQ_DISABLED, &lp->flags);
+               napi_schedule(&lp->napi);
        }
 
-real_rx:
-       bfin_mac_rx(dev);
-       number++;
-       goto get_one_packet;
+       return IRQ_HANDLED;
 }
 
 #ifdef CONFIG_NET_POLL_CONTROLLER
-static void bfin_mac_poll(struct net_device *dev)
+static void bfin_mac_poll_controller(struct net_device *dev)
 {
        struct bfin_mac_local *lp = netdev_priv(dev);
 
-       disable_irq(IRQ_MAC_RX);
        bfin_mac_interrupt(IRQ_MAC_RX, dev);
        tx_reclaim_skb(lp);
-       enable_irq(IRQ_MAC_RX);
 }
 #endif                         /* CONFIG_NET_POLL_CONTROLLER */
 
@@ -1428,14 +1440,13 @@ static void bfin_mac_timeout(struct net_device *dev)
                tx_list_head = tx_list_head->next;
        }
 
-       if (netif_queue_stopped(lp->ndev))
-               netif_wake_queue(lp->ndev);
+       if (netif_queue_stopped(dev))
+               netif_wake_queue(dev);
 
        bfin_mac_enable(lp->phydev);
 
        /* We can accept TX packets again */
        dev->trans_start = jiffies; /* prevent tx timeout */
-       netif_wake_queue(dev);
 }
 
 static void bfin_mac_multicast_hash(struct net_device *dev)
@@ -1562,6 +1573,7 @@ static int bfin_mac_open(struct net_device *dev)
                return ret;
        pr_debug("hardware init finished\n");
 
+       napi_enable(&lp->napi);
        netif_start_queue(dev);
        netif_carrier_on(dev);
 
@@ -1579,6 +1591,7 @@ static int bfin_mac_close(struct net_device *dev)
        pr_debug("%s: %s\n", dev->name, __func__);
 
        netif_stop_queue(dev);
+       napi_disable(&lp->napi);
        netif_carrier_off(dev);
 
        phy_stop(lp->phydev);
@@ -1604,7 +1617,7 @@ static const struct net_device_ops bfin_mac_netdev_ops = {
        .ndo_validate_addr      = eth_validate_addr,
        .ndo_change_mtu         = eth_change_mtu,
 #ifdef CONFIG_NET_POLL_CONTROLLER
-       .ndo_poll_controller    = bfin_mac_poll,
+       .ndo_poll_controller    = bfin_mac_poll_controller,
 #endif
 };
 
@@ -1689,6 +1702,9 @@ static int bfin_mac_probe(struct platform_device *pdev)
        lp->tx_reclaim_timer.data = (unsigned long)lp;
        lp->tx_reclaim_timer.function = tx_reclaim_skb_timeout;
 
+       lp->flags = 0;
+       netif_napi_add(ndev, &lp->napi, bfin_mac_poll, CONFIG_BFIN_RX_DESC_NUM);
+
        spin_lock_init(&lp->lock);
 
        /* now, enable interrupts */
@@ -1723,6 +1739,7 @@ out_err_phc:
 out_err_reg_ndev:
        free_irq(IRQ_MAC_RX, ndev);
 out_err_request_irq:
+       netif_napi_del(&lp->napi);
 out_err_mii_probe:
        mdiobus_unregister(lp->mii_bus);
        mdiobus_free(lp->mii_bus);
@@ -1743,6 +1760,8 @@ static int bfin_mac_remove(struct platform_device *pdev)
 
        unregister_netdev(ndev);
 
+       netif_napi_del(&lp->napi);
+
        free_irq(IRQ_MAC_RX, ndev);
 
        free_netdev(ndev);
index 6dec86a..d1217db 100644 (file)
@@ -26,6 +26,7 @@
 #endif
 
 #define TX_RECLAIM_JIFFIES (HZ / 5)
+#define BFIN_MAC_RX_IRQ_DISABLED       1
 
 struct dma_descriptor {
        struct dma_descriptor *next_dma_desc;
@@ -80,6 +81,8 @@ struct bfin_mac_local {
        int irq_wake_requested;
        struct timer_list tx_reclaim_timer;
        struct net_device *ndev;
+       struct napi_struct napi;
+       unsigned long flags;
 
        /* Data for EMAC_VLAN1 regs */
        u16 vlan1_mask, vlan2_mask;
index 2846067..d81e716 100644 (file)
@@ -736,6 +736,7 @@ static int emac_open(struct net_device *dev)
 
        ret = emac_mdio_probe(dev);
        if (ret < 0) {
+               free_irq(dev->irq, dev);
                netdev_err(dev, "cannot probe MDIO bus\n");
                return ret;
        }
index bbaf36d..8319c99 100644 (file)
@@ -182,6 +182,9 @@ config AMD_XGBE
        depends on OF_NET
        select PHYLIB
        select AMD_XGBE_PHY
+       select BITREVERSE
+       select CRC32
+       select PTP_1588_CLOCK
        ---help---
          This driver supports the AMD 10GbE Ethernet device found on an
          AMD SoC.
@@ -189,4 +192,14 @@ config AMD_XGBE
          To compile this driver as a module, choose M here: the module
          will be called amd-xgbe.
 
+config AMD_XGBE_DCB
+       bool "Data Center Bridging (DCB) support"
+       default n
+       depends on AMD_XGBE && DCB
+       ---help---
+         Say Y here to enable Data Center Bridging (DCB) support in the
+         driver.
+
+         If unsure, say N.
+
 endif # NET_VENDOR_AMD
index 068dc7c..841e655 100644 (file)
@@ -101,7 +101,6 @@ Revision History:
 MODULE_AUTHOR("Advanced Micro Devices, Inc.");
 MODULE_DESCRIPTION ("AMD8111 based 10/100 Ethernet Controller. Driver Version "MODULE_VERS);
 MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
 module_param_array(speed_duplex, int, NULL, 0);
 MODULE_PARM_DESC(speed_duplex, "Set device speed and duplex modes, 0: Auto Negotiate, 1: 10Mbps Half Duplex, 2: 10Mbps Full Duplex, 3: 100Mbps Half Duplex, 4: 100Mbps Full Duplex");
 module_param_array(coalesce, bool, NULL, 0);
@@ -109,17 +108,9 @@ MODULE_PARM_DESC(coalesce, "Enable or Disable interrupt coalescing, 1: Enable, 0
 module_param_array(dynamic_ipg, bool, NULL, 0);
 MODULE_PARM_DESC(dynamic_ipg, "Enable or Disable dynamic IPG, 1: Enable, 0: Disable");
 
-static DEFINE_PCI_DEVICE_TABLE(amd8111e_pci_tbl) = {
-
-       { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD8111E_7462,
-        PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
-       { 0, }
-
-};
-/*
-This function will read the PHY registers.
-*/
-static int amd8111e_read_phy(struct amd8111e_priv* lp, int phy_id, int reg, u32* val)
+/* This function will read the PHY registers. */
+static int amd8111e_read_phy(struct amd8111e_priv *lp,
+                            int phy_id, int reg, u32 *val)
 {
        void __iomem *mmio = lp->mmio;
        unsigned int reg_val;
@@ -146,10 +137,9 @@ err_phy_read:
 
 }
 
-/*
-This function will write into PHY registers.
-*/
-static int amd8111e_write_phy(struct amd8111e_priv* lp,int phy_id, int reg, u32 val)
+/* This function will write into PHY registers. */
+static int amd8111e_write_phy(struct amd8111e_priv *lp,
+                             int phy_id, int reg, u32 val)
 {
        unsigned int repeat = REPEAT_CNT;
        void __iomem *mmio = lp->mmio;
@@ -176,12 +166,11 @@ err_phy_write:
        return -EINVAL;
 
 }
-/*
-This is the mii register read function provided to the mii interface.
-*/
-static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
+
+/* This is the mii register read function provided to the mii interface. */
+static int amd8111e_mdio_read(struct net_device *dev, int phy_id, int reg_num)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        unsigned int reg_val;
 
        amd8111e_read_phy(lp,phy_id,reg_num,&reg_val);
@@ -189,19 +178,18 @@ static int amd8111e_mdio_read(struct net_device * dev, int phy_id, int reg_num)
 
 }
 
-/*
-This is the mii register write function provided to the mii interface.
-*/
-static void amd8111e_mdio_write(struct net_device * dev, int phy_id, int reg_num, int val)
+/* This is the mii register write function provided to the mii interface. */
+static void amd8111e_mdio_write(struct net_device *dev,
+                               int phy_id, int reg_num, int val)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
 
        amd8111e_write_phy(lp, phy_id, reg_num, val);
 }
 
-/*
-This function will set PHY speed. During initialization sets the original speed to 100 full.
-*/
+/* This function will set PHY speed. During initialization sets
+ * the original speed to 100 full
+ */
 static void amd8111e_set_ext_phy(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -240,14 +228,13 @@ static void amd8111e_set_ext_phy(struct net_device *dev)
 
 }
 
-/*
-This function will unmap skb->data space and will free
-all transmit and receive skbuffs.
-*/
+/* This function will unmap skb->data space and will free
+ * all transmit and receive skbuffs.
+ */
 static int amd8111e_free_skbs(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
-       struct sk_buffrx_skbuff;
+       struct sk_buff *rx_skbuff;
        int i;
 
        /* Freeing transmit skbs */
@@ -274,18 +261,18 @@ static int amd8111e_free_skbs(struct net_device *dev)
        return 0;
 }
 
-/*
-This will set the receive buffer length corresponding to the mtu size of networkinterface.
-*/
-static inline void amd8111e_set_rx_buff_len(struct net_devicedev)
+/* This will set the receive buffer length corresponding
+ * to the mtu size of networkinterface.
+ */
+static inline void amd8111e_set_rx_buff_len(struct net_device *dev)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        unsigned int mtu = dev->mtu;
 
        if (mtu > ETH_DATA_LEN){
                /* MTU + ethernet header + FCS
-               + optional VLAN tag + skb reserve space 2 */
-
+                * + optional VLAN tag + skb reserve space 2
+                */
                lp->rx_buff_len = mtu + ETH_HLEN + 10;
                lp->options |= OPTION_JUMBO_ENABLE;
        } else{
@@ -294,8 +281,10 @@ static inline void amd8111e_set_rx_buff_len(struct net_device* dev)
        }
 }
 
-/*
-This function will free all the previously allocated buffers, determine new receive buffer length  and will allocate new receive buffers. This function also allocates and initializes both the transmitter and receive hardware descriptors.
+/* This function will free all the previously allocated buffers,
+ * determine new receive buffer length  and will allocate new receive buffers.
+ * This function also allocates and initializes both the transmitter
+ * and receive hardware descriptors.
  */
 static int amd8111e_init_ring(struct net_device *dev)
 {
@@ -376,15 +365,18 @@ err_free_tx_ring:
 err_no_mem:
        return -ENOMEM;
 }
-/* This function will set the interrupt coalescing according to the input arguments */
-static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
+
+/* This function will set the interrupt coalescing according
+ * to the input arguments
+ */
+static int amd8111e_set_coalesce(struct net_device *dev, enum coal_mode cmod)
 {
        unsigned int timeout;
        unsigned int event_count;
 
        struct amd8111e_priv *lp = netdev_priv(dev);
        void __iomem *mmio = lp->mmio;
-       struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+       struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
 
 
        switch(cmod)
@@ -435,9 +427,7 @@ static int amd8111e_set_coalesce(struct net_device * dev, enum coal_mode cmod)
 
 }
 
-/*
-This function initializes the device registers  and starts the device.
-*/
+/* This function initializes the device registers  and starts the device. */
 static int amd8111e_restart(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -501,8 +491,7 @@ static int amd8111e_restart(struct net_device *dev)
 
        /* Enable interrupt coalesce */
        if(lp->options & OPTION_INTR_COAL_ENABLE){
-               printk(KERN_INFO "%s: Interrupt Coalescing Enabled.\n",
-                                                               dev->name);
+               netdev_info(dev, "Interrupt Coalescing Enabled.\n");
                amd8111e_set_coalesce(dev,ENABLE_COAL);
        }
 
@@ -514,10 +503,9 @@ static int amd8111e_restart(struct net_device *dev)
        readl(mmio+CMD0);
        return 0;
 }
-/*
-This function clears necessary the device registers.
-*/
-static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
+
+/* This function clears necessary the device registers. */
+static void amd8111e_init_hw_default(struct amd8111e_priv *lp)
 {
        unsigned int reg_val;
        unsigned int logic_filter[2] ={0,};
@@ -587,7 +575,7 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
        writew(MIB_CLEAR, mmio + MIB_ADDR);
 
        /* Clear LARF */
-       amd8111e_writeq(*(u64*)logic_filter,mmio+LADRF);
+       amd8111e_writeq(*(u64 *)logic_filter, mmio + LADRF);
 
        /* SRAM_SIZE register */
        reg_val = readl(mmio + SRAM_SIZE);
@@ -605,11 +593,10 @@ static void amd8111e_init_hw_default( struct amd8111e_priv* lp)
 
 }
 
-/*
-This function disables the interrupt and clears all the pending
-interrupts in INT0
+/* This function disables the interrupt and clears all the pending
+ * interrupts in INT0
  */
-static void amd8111e_disable_interrupt(struct amd8111e_privlp)
+static void amd8111e_disable_interrupt(struct amd8111e_priv *lp)
 {
        u32 intr0;
 
@@ -625,10 +612,8 @@ static void amd8111e_disable_interrupt(struct amd8111e_priv* lp)
 
 }
 
-/*
-This function stops the chip.
-*/
-static void amd8111e_stop_chip(struct amd8111e_priv* lp)
+/* This function stops the chip. */
+static void amd8111e_stop_chip(struct amd8111e_priv *lp)
 {
        writel(RUN, lp->mmio + CMD0);
 
@@ -636,10 +621,8 @@ static void amd8111e_stop_chip(struct amd8111e_priv* lp)
        readl(lp->mmio + CMD0);
 }
 
-/*
-This function frees the  transmiter and receiver descriptor rings.
-*/
-static void amd8111e_free_ring(struct amd8111e_priv* lp)
+/* This function frees the  transmiter and receiver descriptor rings. */
+static void amd8111e_free_ring(struct amd8111e_priv *lp)
 {
        /* Free transmit and receive descriptor rings */
        if(lp->rx_ring){
@@ -659,12 +642,13 @@ static void amd8111e_free_ring(struct amd8111e_priv* lp)
 
 }
 
-/*
-This function will free all the transmit skbs that are actually transmitted by the device. It will check the ownership of the skb before freeing the skb.
-*/
+/* This function will free all the transmit skbs that are actually
+ * transmitted by the device. It will check the ownership of the
+ * skb before freeing the skb.
+ */
 static int amd8111e_tx(struct net_device *dev)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        int tx_index = lp->tx_complete_idx & TX_RING_DR_MOD_MASK;
        int status;
        /* Complete all the transmit packet */
@@ -724,21 +708,20 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                goto rx_not_empty;
 
        do{
-               /* process receive packets until we use the quota*/
-               /* If we own the next entry, it's a new packet. Send it up. */
+               /* process receive packets until we use the quota.
+                * If we own the next entry, it's a new packet. Send it up.
+                */
                while(1) {
                        status = le16_to_cpu(lp->rx_ring[rx_index].rx_flags);
                        if (status & OWN_BIT)
                                break;
 
-                       /*
-                        * There is a tricky error noted by John Murphy,
+                       /* There is a tricky error noted by John Murphy,
                         * <murf@perftech.com> to Russ Nelson: Even with
                         * full-sized * buffers it's possible for a
                         * jabber packet to use two buffers, with only
                         * the last correctly noting the error.
                         */
-
                        if(status & ERR_BIT) {
                                /* reseting flags */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
@@ -771,7 +754,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                        new_skb = netdev_alloc_skb(dev, lp->rx_buff_len);
                        if (!new_skb) {
                                /* if allocation fail,
-                                  ignore that pkt and go to next one */
+                                * ignore that pkt and go to next one
+                                */
                                lp->rx_ring[rx_index].rx_flags &= RESET_RX_FLAGS;
                                lp->drv_rx_errors++;
                                goto err_next_pkt;
@@ -812,8 +796,8 @@ static int amd8111e_rx_poll(struct napi_struct *napi, int budget)
                        rx_index = (++lp->rx_idx) & RX_RING_DR_MOD_MASK;
                }
                /* Check the interrupt status register for more packets in the
-                  mean time. Process them since we have not used up our quota.*/
-
+                * mean time. Process them since we have not used up our quota.
+                */
                intr0 = readl(mmio + INT0);
                /*Ack receive packets */
                writel(intr0 & RINT0,mmio + INT0);
@@ -833,10 +817,8 @@ rx_not_empty:
        return num_rx_pkt;
 }
 
-/*
-This function will indicate the link status to the kernel.
-*/
-static int amd8111e_link_change(struct net_device* dev)
+/* This function will indicate the link status to the kernel. */
+static int amd8111e_link_change(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        int status0,speed;
@@ -860,24 +842,26 @@ static int amd8111e_link_change(struct net_device* dev)
                else if(speed == PHY_SPEED_100)
                        lp->link_config.speed = SPEED_100;
 
-               printk(KERN_INFO "%s: Link is Up. Speed is %s Mbps %s Duplex\n",                        dev->name,
-                      (lp->link_config.speed == SPEED_100) ? "100": "10",
-                      (lp->link_config.duplex == DUPLEX_FULL)? "Full": "Half");
+               netdev_info(dev, "Link is Up. Speed is %s Mbps %s Duplex\n",
+                           (lp->link_config.speed == SPEED_100) ?
+                                                       "100" : "10",
+                           (lp->link_config.duplex == DUPLEX_FULL) ?
+                                                       "Full" : "Half");
+
                netif_carrier_on(dev);
        }
        else{
                lp->link_config.speed = SPEED_INVALID;
                lp->link_config.duplex = DUPLEX_INVALID;
                lp->link_config.autoneg = AUTONEG_INVALID;
-               printk(KERN_INFO "%s: Link is Down.\n",dev->name);
+               netdev_info(dev, "Link is Down.\n");
                netif_carrier_off(dev);
        }
 
        return 0;
 }
-/*
-This function reads the mib counters.
-*/
+
+/* This function reads the mib counters. */
 static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
 {
        unsigned int  status;
@@ -895,8 +879,7 @@ static int amd8111e_read_mib(void __iomem *mmio, u8 MIB_COUNTER)
        return data;
 }
 
-/*
- * This function reads the mib registers and returns the hardware statistics.
+/* This function reads the mib registers and returns the hardware statistics.
  * It updates previous internal driver statistics with new values.
  */
 static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
@@ -992,13 +975,14 @@ static struct net_device_stats *amd8111e_get_stats(struct net_device *dev)
 
        return new_stats;
 }
+
 /* This function recalculate the interrupt coalescing  mode on every interrupt
-according to the datarate and the packet rate.
-*/
+ * according to the datarate and the packet rate.
+ */
 static int amd8111e_calc_coalesce(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
-       struct amd8111e_coalesce_conf * coal_conf = &lp->coal_conf;
+       struct amd8111e_coalesce_conf *coal_conf = &lp->coal_conf;
        int tx_pkt_rate;
        int rx_pkt_rate;
        int tx_data_rate;
@@ -1126,13 +1110,14 @@ static int amd8111e_calc_coalesce(struct net_device *dev)
        return 0;
 
 }
-/*
-This is device interrupt function. It handles transmit, receive,link change and hardware timer interrupts.
-*/
+
+/* This is device interrupt function. It handles transmit,
+ * receive,link change and hardware timer interrupts.
+ */
 static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
 {
 
-       struct net_device * dev = (struct net_device *) dev_id;
+       struct net_device *dev = (struct net_device *)dev_id;
        struct amd8111e_priv *lp = netdev_priv(dev);
        void __iomem *mmio = lp->mmio;
        unsigned int intr0, intren0;
@@ -1168,7 +1153,7 @@ static irqreturn_t amd8111e_interrupt(int irq, void *dev_id)
                        /* Schedule a polling routine */
                        __napi_schedule(&lp->napi);
                } else if (intren0 & RINTEN0) {
-                       printk("************Driver bug! interrupt while in poll\n");
+                       netdev_dbg(dev, "************Driver bug! interrupt while in poll\n");
                        /* Fix by disable receive interrupts */
                        writel(RINTEN0, mmio + INTEN0);
                }
@@ -1205,10 +1190,11 @@ static void amd8111e_poll(struct net_device *dev)
 #endif
 
 
-/*
-This function closes the network interface and updates the statistics so that most recent statistics will be available after the interface is down.
-*/
-static int amd8111e_close(struct net_device * dev)
+/* This function closes the network interface and updates
+ * the statistics so that most recent statistics will be
+ * available after the interface is down.
+ */
+static int amd8111e_close(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        netif_stop_queue(dev);
@@ -1238,9 +1224,11 @@ static int amd8111e_close(struct net_device * dev)
        lp->opened = 0;
        return 0;
 }
-/* This function opens new interface.It requests irq for the device, initializes the device,buffers and descriptors, and starts the device.
-*/
-static int amd8111e_open(struct net_device * dev )
+
+/* This function opens new interface.It requests irq for the device,
+ * initializes the device,buffers and descriptors, and starts the device.
+ */
+static int amd8111e_open(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
 
@@ -1264,7 +1252,7 @@ static int amd8111e_open(struct net_device * dev )
        /* Start ipg timer */
        if(lp->options & OPTION_DYN_IPG_ENABLE){
                add_timer(&lp->ipg_data.ipg_timer);
-               printk(KERN_INFO "%s: Dynamic IPG Enabled.\n",dev->name);
+               netdev_info(dev, "Dynamic IPG Enabled\n");
        }
 
        lp->opened = 1;
@@ -1275,10 +1263,11 @@ static int amd8111e_open(struct net_device * dev )
 
        return 0;
 }
-/*
-This function checks if there is any transmit  descriptors available to queue more packet.
-*/
-static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
+
+/* This function checks if there is any transmit  descriptors
+ * available to queue more packet.
+ */
+static int amd8111e_tx_queue_avail(struct amd8111e_priv *lp)
 {
        int tx_index = lp->tx_idx & TX_BUFF_MOD_MASK;
        if (lp->tx_skbuff[tx_index])
@@ -1287,12 +1276,14 @@ static int amd8111e_tx_queue_avail(struct amd8111e_priv* lp )
                return 0;
 
 }
-/*
-This function will queue the transmit packets to the descriptors and will trigger the send operation. It also initializes the transmit descriptors with buffer physical address, byte count, ownership to hardware etc.
-*/
 
+/* This function will queue the transmit packets to the
+ * descriptors and will trigger the send operation. It also
+ * initializes the transmit descriptors with buffer physical address,
+ * byte count, ownership to hardware etc.
+ */
 static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
-                                      struct net_device * dev)
+                                      struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        int tx_index;
@@ -1338,9 +1329,7 @@ static netdev_tx_t amd8111e_start_xmit(struct sk_buff *skb,
        spin_unlock_irqrestore(&lp->lock, flags);
        return NETDEV_TX_OK;
 }
-/*
-This function returns all the memory mapped registers of the device.
-*/
+/* This function returns all the memory mapped registers of the device. */
 static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
 {
        void __iomem *mmio = lp->mmio;
@@ -1361,10 +1350,9 @@ static void amd8111e_read_regs(struct amd8111e_priv *lp, u32 *buf)
 }
 
 
-/*
-This function sets promiscuos mode, all-multi mode or the multicast address
-list to the device.
-*/
+/* This function sets promiscuos mode, all-multi mode or the multicast address
+ * list to the device.
+ */
 static void amd8111e_set_multicast_list(struct net_device *dev)
 {
        struct netdev_hw_addr *ha;
@@ -1383,14 +1371,14 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
                /* get all multicast packet */
                mc_filter[1] = mc_filter[0] = 0xffffffff;
                lp->options |= OPTION_MULTICAST_ENABLE;
-               amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+               amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
                return;
        }
        if (netdev_mc_empty(dev)) {
                /* get only own packets */
                mc_filter[1] = mc_filter[0] = 0;
                lp->options &= ~OPTION_MULTICAST_ENABLE;
-               amd8111e_writeq(*(u64*)mc_filter,lp->mmio + LADRF);
+               amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
                /* disable promiscuous mode */
                writel(PROM, lp->mmio + CMD2);
                return;
@@ -1402,14 +1390,15 @@ static void amd8111e_set_multicast_list(struct net_device *dev)
                bit_num = (ether_crc_le(ETH_ALEN, ha->addr) >> 26) & 0x3f;
                mc_filter[bit_num >> 5] |= 1 << (bit_num & 31);
        }
-       amd8111e_writeq(*(u64*)mc_filter,lp->mmio+ LADRF);
+       amd8111e_writeq(*(u64 *)mc_filter, lp->mmio + LADRF);
 
        /* To eliminate PCI posting bug */
        readl(lp->mmio + CMD2);
 
 }
 
-static void amd8111e_get_drvinfo(struct net_device* dev, struct ethtool_drvinfo *info)
+static void amd8111e_get_drvinfo(struct net_device *dev,
+                                struct ethtool_drvinfo *info)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
        struct pci_dev *pci_dev = lp->pci_dev;
@@ -1501,11 +1490,11 @@ static const struct ethtool_ops ops = {
        .set_wol = amd8111e_set_wol,
 };
 
-/*
-This function handles all the  ethtool ioctls. It gives driver info, gets/sets driver speed, gets memory mapped register values, forces auto negotiation, sets/gets WOL options for ethtool application.
-*/
-
-static int amd8111e_ioctl(struct net_device * dev , struct ifreq *ifr, int cmd)
+/* This function handles all the  ethtool ioctls. It gives driver info,
+ * gets/sets driver speed, gets memory mapped register values, forces
+ * auto negotiation, sets/gets WOL options for ethtool application.
+ */
+static int amd8111e_ioctl(struct net_device *dev , struct ifreq *ifr, int cmd)
 {
        struct mii_ioctl_data *data = if_mii(ifr);
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -1559,9 +1548,9 @@ static int amd8111e_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
-/*
-This function changes the mtu of the device. It restarts the device  to initialize the descriptor with new receive buffers.
-*/
+/* This function changes the mtu of the device. It restarts the device  to
+ * initialize the descriptor with new receive buffers.
+ */
 static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
@@ -1572,7 +1561,8 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
 
        if (!netif_running(dev)) {
                /* new_mtu will be used
-                  when device starts netxt time */
+                * when device starts netxt time
+                */
                dev->mtu = new_mtu;
                return 0;
        }
@@ -1591,7 +1581,7 @@ static int amd8111e_change_mtu(struct net_device *dev, int new_mtu)
        return err;
 }
 
-static int amd8111e_enable_magicpkt(struct amd8111e_privlp)
+static int amd8111e_enable_magicpkt(struct amd8111e_priv *lp)
 {
        writel( VAL1|MPPLBA, lp->mmio + CMD3);
        writel( VAL0|MPEN_SW, lp->mmio + CMD7);
@@ -1601,7 +1591,7 @@ static int amd8111e_enable_magicpkt(struct amd8111e_priv* lp)
        return 0;
 }
 
-static int amd8111e_enable_link_change(struct amd8111e_privlp)
+static int amd8111e_enable_link_change(struct amd8111e_priv *lp)
 {
 
        /* Adapter is already stoped/suspended/interrupt-disabled */
@@ -1612,19 +1602,18 @@ static int amd8111e_enable_link_change(struct amd8111e_priv* lp)
        return 0;
 }
 
-/*
- * This function is called when a packet transmission fails to complete
+/* This function is called when a packet transmission fails to complete
  * within a reasonable period, on the assumption that an interrupt have
  * failed or the interface is locked up. This function will reinitialize
  * the hardware.
  */
 static void amd8111e_tx_timeout(struct net_device *dev)
 {
-       struct amd8111e_privlp = netdev_priv(dev);
+       struct amd8111e_priv *lp = netdev_priv(dev);
        int err;
 
-       printk(KERN_ERR "%s: transmit timed out, resetting\n",
-                                                     dev->name);
+       netdev_err(dev, "transmit timed out, resetting\n");
+
        spin_lock_irq(&lp->lock);
        err = amd8111e_restart(dev);
        spin_unlock_irq(&lp->lock);
@@ -1701,22 +1690,10 @@ static int amd8111e_resume(struct pci_dev *pci_dev)
        return 0;
 }
 
-
-static void amd8111e_remove_one(struct pci_dev *pdev)
-{
-       struct net_device *dev = pci_get_drvdata(pdev);
-       if (dev) {
-               unregister_netdev(dev);
-               iounmap(((struct amd8111e_priv *)netdev_priv(dev))->mmio);
-               free_netdev(dev);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-       }
-}
-static void amd8111e_config_ipg(struct net_device* dev)
+static void amd8111e_config_ipg(struct net_device *dev)
 {
        struct amd8111e_priv *lp = netdev_priv(dev);
-       struct ipg_infoipg_data = &lp->ipg_data;
+       struct ipg_info *ipg_data = &lp->ipg_data;
        void __iomem *mmio = lp->mmio;
        unsigned int prev_col_cnt = ipg_data->col_cnt;
        unsigned int total_col_cnt;
@@ -1814,27 +1791,24 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 {
        int err, i;
        unsigned long reg_addr,reg_len;
-       struct amd8111e_privlp;
-       struct net_devicedev;
+       struct amd8111e_priv *lp;
+       struct net_device *dev;
 
        err = pci_enable_device(pdev);
        if(err){
-               printk(KERN_ERR "amd8111e: Cannot enable new PCI device, "
-                       "exiting.\n");
+               dev_err(&pdev->dev, "Cannot enable new PCI device\n");
                return err;
        }
 
        if(!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)){
-               printk(KERN_ERR "amd8111e: Cannot find PCI base address, "
-                      "exiting.\n");
+               dev_err(&pdev->dev, "Cannot find PCI base address\n");
                err = -ENODEV;
                goto err_disable_pdev;
        }
 
        err = pci_request_regions(pdev, MODULE_NAME);
        if(err){
-               printk(KERN_ERR "amd8111e: Cannot obtain PCI resources, "
-                      "exiting.\n");
+               dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
                goto err_disable_pdev;
        }
 
@@ -1842,16 +1816,14 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        /* Find power-management capability. */
        if (!pdev->pm_cap) {
-               printk(KERN_ERR "amd8111e: No Power Management capability, "
-                      "exiting.\n");
+               dev_err(&pdev->dev, "No Power Management capability\n");
                err = -ENODEV;
                goto err_free_reg;
        }
 
        /* Initialize DMA */
        if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) < 0) {
-               printk(KERN_ERR "amd8111e: DMA not supported,"
-                       "exiting.\n");
+               dev_err(&pdev->dev, "DMA not supported\n");
                err = -ENODEV;
                goto err_free_reg;
        }
@@ -1878,10 +1850,9 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        spin_lock_init(&lp->lock);
 
-       lp->mmio = ioremap(reg_addr, reg_len);
+       lp->mmio = devm_ioremap(&pdev->dev, reg_addr, reg_len);
        if (!lp->mmio) {
-               printk(KERN_ERR "amd8111e: Cannot map device registers, "
-                      "exiting\n");
+               dev_err(&pdev->dev, "Cannot map device registers\n");
                err = -ENOMEM;
                goto err_free_dev;
        }
@@ -1923,9 +1894,8 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
 
        err = register_netdev(dev);
        if (err) {
-               printk(KERN_ERR "amd8111e: Cannot register net device, "
-                      "exiting.\n");
-               goto err_iounmap;
+               dev_err(&pdev->dev, "Cannot register net device\n");
+               goto err_free_dev;
        }
 
        pci_set_drvdata(pdev, dev);
@@ -1942,21 +1912,17 @@ static int amd8111e_probe_one(struct pci_dev *pdev,
        }
 
        /*  display driver and device information */
-
        chip_version = (readl(lp->mmio + CHIPID) & 0xf0000000)>>28;
-       printk(KERN_INFO "%s: AMD-8111e Driver Version: %s\n",
-              dev->name,MODULE_VERS);
-       printk(KERN_INFO "%s: [ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
-              dev->name, chip_version, dev->dev_addr);
+       dev_info(&pdev->dev, "AMD-8111e Driver Version: %s\n", MODULE_VERS);
+       dev_info(&pdev->dev, "[ Rev %x ] PCI 10/100BaseT Ethernet %pM\n",
+                chip_version, dev->dev_addr);
        if (lp->ext_phy_id)
-               printk(KERN_INFO "%s: Found MII PHY ID 0x%08x at address 0x%02x\n",
-                      dev->name, lp->ext_phy_id, lp->ext_phy_addr);
+               dev_info(&pdev->dev, "Found MII PHY ID 0x%08x at address 0x%02x\n",
+                        lp->ext_phy_id, lp->ext_phy_addr);
        else
-               printk(KERN_INFO "%s: Couldn't detect MII PHY, assuming address 0x01\n",
-                      dev->name);
+               dev_info(&pdev->dev, "Couldn't detect MII PHY, assuming address 0x01\n");
+
        return 0;
-err_iounmap:
-       iounmap(lp->mmio);
 
 err_free_dev:
        free_netdev(dev);
@@ -1970,6 +1936,29 @@ err_disable_pdev:
 
 }
 
+static void amd8111e_remove_one(struct pci_dev *pdev)
+{
+       struct net_device *dev = pci_get_drvdata(pdev);
+
+       if (dev) {
+               unregister_netdev(dev);
+               free_netdev(dev);
+               pci_release_regions(pdev);
+               pci_disable_device(pdev);
+       }
+}
+
+static const struct pci_device_id amd8111e_pci_tbl[] = {
+       {
+        .vendor = PCI_VENDOR_ID_AMD,
+        .device = PCI_DEVICE_ID_AMD8111E_7462,
+       },
+       {
+        .vendor = 0,
+       }
+};
+MODULE_DEVICE_TABLE(pci, amd8111e_pci_tbl);
+
 static struct pci_driver amd8111e_driver = {
        .name           = MODULE_NAME,
        .id_table       = amd8111e_pci_tbl,
index 5739729..b584b78 100644 (file)
@@ -475,7 +475,7 @@ static void lance_init_ring(struct net_device *dev)
        *lib_ptr(ib, rx_ptr, lp->type) = leptr;
        if (ZERO)
                printk("RX ptr: %8.8x(%8.8x)\n",
-                      leptr, lib_off(brx_ring, lp->type));
+                      leptr, (uint)lib_off(brx_ring, lp->type));
 
        /* Setup tx descriptor pointer */
        leptr = offsetof(struct lance_init_block, btx_ring);
@@ -484,7 +484,7 @@ static void lance_init_ring(struct net_device *dev)
        *lib_ptr(ib, tx_ptr, lp->type) = leptr;
        if (ZERO)
                printk("TX ptr: %8.8x(%8.8x)\n",
-                      leptr, lib_off(btx_ring, lp->type));
+                      leptr, (uint)lib_off(btx_ring, lp->type));
 
        if (ZERO)
                printk("TX rings:\n");
@@ -499,8 +499,8 @@ static void lance_init_ring(struct net_device *dev)
                                                /* The ones required by tmd2 */
                *lib_ptr(ib, btx_ring[i].misc, lp->type) = 0;
                if (i < 3 && ZERO)
-                       printk("%d: 0x%8.8x(0x%8.8x)\n",
-                              i, leptr, (uint)lp->tx_buf_ptr_cpu[i]);
+                       printk("%d: %8.8x(%p)\n",
+                              i, leptr, lp->tx_buf_ptr_cpu[i]);
        }
 
        /* Setup the Rx ring entries */
@@ -516,8 +516,8 @@ static void lance_init_ring(struct net_device *dev)
                                                             0xf000;
                *lib_ptr(ib, brx_ring[i].mblength, lp->type) = 0;
                if (i < 3 && ZERO)
-                       printk("%d: 0x%8.8x(0x%8.8x)\n",
-                              i, leptr, (uint)lp->rx_buf_ptr_cpu[i]);
+                       printk("%d: %8.8x(%p)\n",
+                              i, leptr, lp->rx_buf_ptr_cpu[i]);
        }
        iob();
 }
index 26cf9af..171a7e6 100644 (file)
@@ -1,6 +1,8 @@
 obj-$(CONFIG_AMD_XGBE) += amd-xgbe.o
 
 amd-xgbe-objs := xgbe-main.o xgbe-drv.o xgbe-dev.o \
-                xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o
+                xgbe-desc.o xgbe-ethtool.o xgbe-mdio.o \
+                xgbe-ptp.o
 
+amd-xgbe-$(CONFIG_AMD_XGBE_DCB) += xgbe-dcb.o
 amd-xgbe-$(CONFIG_DEBUG_FS) += xgbe-debugfs.o
index bf462ee..cc25a3a 100644 (file)
 #define DMA_MR_SWR_WIDTH               1
 #define DMA_SBMR_EAME_INDEX            11
 #define DMA_SBMR_EAME_WIDTH            1
+#define DMA_SBMR_BLEN_256_INDEX                7
+#define DMA_SBMR_BLEN_256_WIDTH                1
 #define DMA_SBMR_UNDEF_INDEX           0
 #define DMA_SBMR_UNDEF_WIDTH           1
 
 #define MAC_PFR                                0x0008
 #define MAC_WTR                                0x000c
 #define MAC_HTR0                       0x0010
-#define MAC_HTR1                       0x0014
-#define MAC_HTR2                       0x0018
-#define MAC_HTR3                       0x001c
-#define MAC_HTR4                       0x0020
-#define MAC_HTR5                       0x0024
-#define MAC_HTR6                       0x0028
-#define MAC_HTR7                       0x002c
 #define MAC_VLANTR                     0x0050
 #define MAC_VLANHTR                    0x0058
 #define MAC_VLANIR                     0x0060
 #define MAC_MACA0LR                    0x0304
 #define MAC_MACA1HR                    0x0308
 #define MAC_MACA1LR                    0x030c
+#define MAC_TSCR                       0x0d00
+#define MAC_SSIR                       0x0d04
+#define MAC_STSR                       0x0d08
+#define MAC_STNR                       0x0d0c
+#define MAC_STSUR                      0x0d10
+#define MAC_STNUR                      0x0d14
+#define MAC_TSAR                       0x0d18
+#define MAC_TSSR                       0x0d20
+#define MAC_TXSNR                      0x0d30
+#define MAC_TXSSR                      0x0d34
 
 #define MAC_QTFCR_INC                  4
 #define MAC_MACA_INC                   4
+#define MAC_HTR_INC                    4
+
+#define MAC_RQC2_INC                   4
+#define MAC_RQC2_Q_PER_REG             4
 
 /* MAC register entry bit positions and sizes */
 #define MAC_HWF0R_ADDMACADRSEL_INDEX   18
 #define MAC_HWF1R_HASHTBLSZ_WIDTH      3
 #define MAC_HWF1R_L3L4FNUM_INDEX       27
 #define MAC_HWF1R_L3L4FNUM_WIDTH       4
+#define MAC_HWF1R_NUMTC_INDEX          21
+#define MAC_HWF1R_NUMTC_WIDTH          3
 #define MAC_HWF1R_RSSEN_INDEX          20
 #define MAC_HWF1R_RSSEN_WIDTH          1
 #define MAC_HWF1R_RXFIFOSIZE_INDEX     0
 #define MAC_HWF2R_TXCHCNT_WIDTH                4
 #define MAC_HWF2R_TXQCNT_INDEX         6
 #define MAC_HWF2R_TXQCNT_WIDTH         4
+#define MAC_IER_TSIE_INDEX             12
+#define MAC_IER_TSIE_WIDTH             1
 #define MAC_ISR_MMCRXIS_INDEX          9
 #define MAC_ISR_MMCRXIS_WIDTH          1
 #define MAC_ISR_MMCTXIS_INDEX          10
 #define MAC_ISR_MMCTXIS_WIDTH          1
 #define MAC_ISR_PMTIS_INDEX            4
 #define MAC_ISR_PMTIS_WIDTH            1
+#define MAC_ISR_TSIS_INDEX             12
+#define MAC_ISR_TSIS_WIDTH             1
 #define MAC_MACA1HR_AE_INDEX           31
 #define MAC_MACA1HR_AE_WIDTH           1
 #define MAC_PFR_HMC_INDEX              2
 #define MAC_PFR_HMC_WIDTH              1
+#define MAC_PFR_HPF_INDEX              10
+#define MAC_PFR_HPF_WIDTH              1
 #define MAC_PFR_HUC_INDEX              1
 #define MAC_PFR_HUC_WIDTH              1
 #define MAC_PFR_PM_INDEX               4
 #define MAC_PFR_PM_WIDTH               1
 #define MAC_PFR_PR_INDEX               0
 #define MAC_PFR_PR_WIDTH               1
+#define MAC_PFR_VTFE_INDEX             16
+#define MAC_PFR_VTFE_WIDTH             1
 #define MAC_PMTCSR_MGKPKTEN_INDEX      1
 #define MAC_PMTCSR_MGKPKTEN_WIDTH      1
 #define MAC_PMTCSR_PWRDWN_INDEX                0
 #define MAC_RCR_LM_WIDTH               1
 #define MAC_RCR_RE_INDEX               0
 #define MAC_RCR_RE_WIDTH               1
+#define MAC_RFCR_PFCE_INDEX            8
+#define MAC_RFCR_PFCE_WIDTH            1
 #define MAC_RFCR_RFE_INDEX             0
 #define MAC_RFCR_RFE_WIDTH             1
+#define MAC_RFCR_UP_INDEX              1
+#define MAC_RFCR_UP_WIDTH              1
 #define MAC_RQC0R_RXQ0EN_INDEX         0
 #define MAC_RQC0R_RXQ0EN_WIDTH         2
+#define MAC_SSIR_SNSINC_INDEX          8
+#define MAC_SSIR_SNSINC_WIDTH          8
+#define MAC_SSIR_SSINC_INDEX           16
+#define MAC_SSIR_SSINC_WIDTH           8
 #define MAC_TCR_SS_INDEX               29
 #define MAC_TCR_SS_WIDTH               2
 #define MAC_TCR_TE_INDEX               0
 #define MAC_TCR_TE_WIDTH               1
+#define MAC_TSCR_AV8021ASMEN_INDEX     28
+#define MAC_TSCR_AV8021ASMEN_WIDTH     1
+#define MAC_TSCR_SNAPTYPSEL_INDEX      16
+#define MAC_TSCR_SNAPTYPSEL_WIDTH      2
+#define MAC_TSCR_TSADDREG_INDEX                5
+#define MAC_TSCR_TSADDREG_WIDTH                1
+#define MAC_TSCR_TSCFUPDT_INDEX                1
+#define MAC_TSCR_TSCFUPDT_WIDTH                1
+#define MAC_TSCR_TSCTRLSSR_INDEX       9
+#define MAC_TSCR_TSCTRLSSR_WIDTH       1
+#define MAC_TSCR_TSENA_INDEX           0
+#define MAC_TSCR_TSENA_WIDTH           1
+#define MAC_TSCR_TSENALL_INDEX         8
+#define MAC_TSCR_TSENALL_WIDTH         1
+#define MAC_TSCR_TSEVNTENA_INDEX       14
+#define MAC_TSCR_TSEVNTENA_WIDTH       1
+#define MAC_TSCR_TSINIT_INDEX          2
+#define MAC_TSCR_TSINIT_WIDTH          1
+#define MAC_TSCR_TSIPENA_INDEX         11
+#define MAC_TSCR_TSIPENA_WIDTH         1
+#define MAC_TSCR_TSIPV4ENA_INDEX       13
+#define MAC_TSCR_TSIPV4ENA_WIDTH       1
+#define MAC_TSCR_TSIPV6ENA_INDEX       12
+#define MAC_TSCR_TSIPV6ENA_WIDTH       1
+#define MAC_TSCR_TSMSTRENA_INDEX       15
+#define MAC_TSCR_TSMSTRENA_WIDTH       1
+#define MAC_TSCR_TSVER2ENA_INDEX       10
+#define MAC_TSCR_TSVER2ENA_WIDTH       1
+#define MAC_TSCR_TXTSSTSM_INDEX                24
+#define MAC_TSCR_TXTSSTSM_WIDTH                1
+#define MAC_TSSR_TXTSC_INDEX           15
+#define MAC_TSSR_TXTSC_WIDTH           1
+#define MAC_TXSNR_TXTSSTSMIS_INDEX     31
+#define MAC_TXSNR_TXTSSTSMIS_WIDTH     1
+#define MAC_VLANHTR_VLHT_INDEX         0
+#define MAC_VLANHTR_VLHT_WIDTH         16
+#define MAC_VLANIR_VLTI_INDEX          20
+#define MAC_VLANIR_VLTI_WIDTH          1
+#define MAC_VLANIR_CSVL_INDEX          19
+#define MAC_VLANIR_CSVL_WIDTH          1
 #define MAC_VLANTR_DOVLTC_INDEX                20
 #define MAC_VLANTR_DOVLTC_WIDTH                1
 #define MAC_VLANTR_ERSVLM_INDEX                19
 #define MAC_VLANTR_ERSVLM_WIDTH                1
 #define MAC_VLANTR_ESVL_INDEX          18
 #define MAC_VLANTR_ESVL_WIDTH          1
+#define MAC_VLANTR_ETV_INDEX           16
+#define MAC_VLANTR_ETV_WIDTH           1
 #define MAC_VLANTR_EVLS_INDEX          21
 #define MAC_VLANTR_EVLS_WIDTH          2
 #define MAC_VLANTR_EVLRXS_INDEX                24
 #define MAC_VLANTR_EVLRXS_WIDTH                1
+#define MAC_VLANTR_VL_INDEX            0
+#define MAC_VLANTR_VL_WIDTH            16
+#define MAC_VLANTR_VTHM_INDEX          25
+#define MAC_VLANTR_VTHM_WIDTH          1
+#define MAC_VLANTR_VTIM_INDEX          17
+#define MAC_VLANTR_VTIM_WIDTH          1
 #define MAC_VR_DEVID_INDEX             8
 #define MAC_VR_DEVID_WIDTH             8
 #define MAC_VR_SNPSVER_INDEX           0
 
 #define MTL_RQDCM_INC                  4
 #define MTL_RQDCM_Q_PER_REG            4
+#define MTL_TCPM_INC                   4
+#define MTL_TCPM_TC_PER_REG            4
 
 /* MTL register entry bit positions and sizes */
 #define MTL_OMR_ETSALG_INDEX           5
 #define MTL_Q_TQOMR                    0x00
 #define MTL_Q_TQUR                     0x04
 #define MTL_Q_TQDR                     0x08
-#define MTL_Q_TCECR                    0x10
-#define MTL_Q_TCESR                    0x14
-#define MTL_Q_TCQWR                    0x18
 #define MTL_Q_RQOMR                    0x40
 #define MTL_Q_RQMPOCR                  0x44
 #define MTL_Q_RQDR                     0x4c
 #define MTL_Q_ISR                      0x74
 
 /* MTL queue register entry bit positions and sizes */
-#define MTL_Q_TCQWR_QW_INDEX           0
-#define MTL_Q_TCQWR_QW_WIDTH           21
 #define MTL_Q_RQOMR_EHFC_INDEX         7
 #define MTL_Q_RQOMR_EHFC_WIDTH         1
 #define MTL_Q_RQOMR_RFA_INDEX          8
 #define MTL_Q_RQOMR_RTC_WIDTH          2
 #define MTL_Q_TQOMR_FTQ_INDEX          0
 #define MTL_Q_TQOMR_FTQ_WIDTH          1
+#define MTL_Q_TQOMR_Q2TCMAP_INDEX      8
+#define MTL_Q_TQOMR_Q2TCMAP_WIDTH      3
 #define MTL_Q_TQOMR_TQS_INDEX          16
 #define MTL_Q_TQOMR_TQS_WIDTH          10
 #define MTL_Q_TQOMR_TSF_INDEX          1
 #define MTL_TC_INC                     MTL_Q_INC
 
 #define MTL_TC_ETSCR                   0x10
+#define MTL_TC_ETSSR                   0x14
+#define MTL_TC_QWR                     0x18
 
 /* MTL traffic class register entry bit positions and sizes */
 #define MTL_TC_ETSCR_TSA_INDEX         0
 #define MTL_TC_ETSCR_TSA_WIDTH         2
+#define MTL_TC_QWR_QW_INDEX            0
+#define MTL_TC_QWR_QW_WIDTH            21
 
 /* MTL traffic class register value */
 #define MTL_TSA_SP                     0x00
 #define RX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH   1
 #define RX_PACKET_ATTRIBUTES_INCOMPLETE_INDEX  2
 #define RX_PACKET_ATTRIBUTES_INCOMPLETE_WIDTH  1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_INDEX        3
+#define RX_PACKET_ATTRIBUTES_CONTEXT_NEXT_WIDTH        1
+#define RX_PACKET_ATTRIBUTES_CONTEXT_INDEX     4
+#define RX_PACKET_ATTRIBUTES_CONTEXT_WIDTH     1
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_INDEX   5
+#define RX_PACKET_ATTRIBUTES_RX_TSTAMP_WIDTH   1
 
 #define RX_NORMAL_DESC0_OVT_INDEX              0
 #define RX_NORMAL_DESC0_OVT_WIDTH              16
+#define RX_NORMAL_DESC3_CDA_INDEX              27
+#define RX_NORMAL_DESC3_CDA_WIDTH              1
+#define RX_NORMAL_DESC3_CTXT_INDEX             30
+#define RX_NORMAL_DESC3_CTXT_WIDTH             1
 #define RX_NORMAL_DESC3_ES_INDEX               15
 #define RX_NORMAL_DESC3_ES_WIDTH               1
 #define RX_NORMAL_DESC3_ETLT_INDEX             16
 #define RX_NORMAL_DESC3_PL_INDEX               0
 #define RX_NORMAL_DESC3_PL_WIDTH               14
 
+#define RX_CONTEXT_DESC3_TSA_INDEX             4
+#define RX_CONTEXT_DESC3_TSA_WIDTH             1
+#define RX_CONTEXT_DESC3_TSD_INDEX             6
+#define RX_CONTEXT_DESC3_TSD_WIDTH             1
+
 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_INDEX 0
 #define TX_PACKET_ATTRIBUTES_CSUM_ENABLE_WIDTH 1
 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_INDEX  1
 #define TX_PACKET_ATTRIBUTES_TSO_ENABLE_WIDTH  1
 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_INDEX   2
 #define TX_PACKET_ATTRIBUTES_VLAN_CTAG_WIDTH   1
+#define TX_PACKET_ATTRIBUTES_PTP_INDEX         3
+#define TX_PACKET_ATTRIBUTES_PTP_WIDTH         1
 
 #define TX_CONTEXT_DESC2_MSS_INDEX             0
 #define TX_CONTEXT_DESC2_MSS_WIDTH             15
 #define TX_NORMAL_DESC2_HL_B1L_WIDTH           14
 #define TX_NORMAL_DESC2_IC_INDEX               31
 #define TX_NORMAL_DESC2_IC_WIDTH               1
+#define TX_NORMAL_DESC2_TTSE_INDEX             30
+#define TX_NORMAL_DESC2_TTSE_WIDTH             1
 #define TX_NORMAL_DESC2_VTIR_INDEX             14
 #define TX_NORMAL_DESC2_VTIR_WIDTH             2
 #define TX_NORMAL_DESC3_CIC_INDEX              16
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c b/drivers/net/ethernet/amd/xgbe/xgbe-dcb.c
new file mode 100644 (file)
index 0000000..7d6a49b
--- /dev/null
@@ -0,0 +1,270 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Advanced Micro Devices, Inc. nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/netdevice.h>
+#include <net/dcbnl.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static int xgbe_dcb_ieee_getets(struct net_device *netdev,
+                               struct ieee_ets *ets)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+       /* Set number of supported traffic classes */
+       ets->ets_cap = pdata->hw_feat.tc_cnt;
+
+       if (pdata->ets) {
+               ets->cbs = pdata->ets->cbs;
+               memcpy(ets->tc_tx_bw, pdata->ets->tc_tx_bw,
+                      sizeof(ets->tc_tx_bw));
+               memcpy(ets->tc_tsa, pdata->ets->tc_tsa,
+                      sizeof(ets->tc_tsa));
+               memcpy(ets->prio_tc, pdata->ets->prio_tc,
+                      sizeof(ets->prio_tc));
+       }
+
+       return 0;
+}
+
+static int xgbe_dcb_ieee_setets(struct net_device *netdev,
+                               struct ieee_ets *ets)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       unsigned int i, tc_ets, tc_ets_weight;
+
+       tc_ets = 0;
+       tc_ets_weight = 0;
+       for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) {
+               DBGPR("  TC%u: tx_bw=%hhu, rx_bw=%hhu, tsa=%hhu\n", i,
+                     ets->tc_tx_bw[i], ets->tc_rx_bw[i], ets->tc_tsa[i]);
+               DBGPR("  PRIO%u: TC=%hhu\n", i, ets->prio_tc[i]);
+
+               if ((ets->tc_tx_bw[i] || ets->tc_tsa[i]) &&
+                   (i >= pdata->hw_feat.tc_cnt))
+                               return -EINVAL;
+
+               if (ets->prio_tc[i] >= pdata->hw_feat.tc_cnt)
+                       return -EINVAL;
+
+               switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       tc_ets = 1;
+                       tc_ets_weight += ets->tc_tx_bw[i];
+                       break;
+
+               default:
+                       return -EINVAL;
+               }
+       }
+
+       /* Weights must add up to 100% */
+       if (tc_ets && (tc_ets_weight != 100))
+               return -EINVAL;
+
+       if (!pdata->ets) {
+               pdata->ets = devm_kzalloc(pdata->dev, sizeof(*pdata->ets),
+                                         GFP_KERNEL);
+               if (!pdata->ets)
+                       return -ENOMEM;
+       }
+
+       memcpy(pdata->ets, ets, sizeof(*pdata->ets));
+
+       pdata->hw_if.config_dcb_tc(pdata);
+
+       return 0;
+}
+
+static int xgbe_dcb_ieee_getpfc(struct net_device *netdev,
+                               struct ieee_pfc *pfc)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+       /* Set number of supported PFC traffic classes */
+       pfc->pfc_cap = pdata->hw_feat.tc_cnt;
+
+       if (pdata->pfc) {
+               pfc->pfc_en = pdata->pfc->pfc_en;
+               pfc->mbc = pdata->pfc->mbc;
+               pfc->delay = pdata->pfc->delay;
+       }
+
+       return 0;
+}
+
+static int xgbe_dcb_ieee_setpfc(struct net_device *netdev,
+                               struct ieee_pfc *pfc)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+       DBGPR("  cap=%hhu, en=%hhx, mbc=%hhu, delay=%hhu\n",
+             pfc->pfc_cap, pfc->pfc_en, pfc->mbc, pfc->delay);
+
+       if (!pdata->pfc) {
+               pdata->pfc = devm_kzalloc(pdata->dev, sizeof(*pdata->pfc),
+                                         GFP_KERNEL);
+               if (!pdata->pfc)
+                       return -ENOMEM;
+       }
+
+       memcpy(pdata->pfc, pfc, sizeof(*pdata->pfc));
+
+       pdata->hw_if.config_dcb_pfc(pdata);
+
+       return 0;
+}
+
+static u8 xgbe_dcb_getdcbx(struct net_device *netdev)
+{
+       return DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_IEEE;
+}
+
+static u8 xgbe_dcb_setdcbx(struct net_device *netdev, u8 dcbx)
+{
+       u8 support = xgbe_dcb_getdcbx(netdev);
+
+       DBGPR("  DCBX=%#hhx\n", dcbx);
+
+       if (dcbx & ~support)
+               return 1;
+
+       if ((dcbx & support) != support)
+               return 1;
+
+       return 0;
+}
+
+static const struct dcbnl_rtnl_ops xgbe_dcbnl_ops = {
+       /* IEEE 802.1Qaz std */
+       .ieee_getets = xgbe_dcb_ieee_getets,
+       .ieee_setets = xgbe_dcb_ieee_setets,
+       .ieee_getpfc = xgbe_dcb_ieee_getpfc,
+       .ieee_setpfc = xgbe_dcb_ieee_setpfc,
+
+       /* DCBX configuration */
+       .getdcbx     = xgbe_dcb_getdcbx,
+       .setdcbx     = xgbe_dcb_setdcbx,
+};
+
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void)
+{
+       return &xgbe_dcbnl_ops;
+}
index 6bb76d5..346592d 100644 (file)
@@ -151,7 +151,7 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
 {
        char workarea[32];
        ssize_t len;
-       unsigned int scan_value;
+       int ret;
 
        if (*ppos != 0)
                return 0;
@@ -165,9 +165,8 @@ static ssize_t xgbe_common_write(const char __user *buffer, size_t count,
                return len;
 
        workarea[len] = '\0';
-       if (sscanf(workarea, "%x", &scan_value) == 1)
-               *value = scan_value;
-       else
+       ret = kstrtouint(workarea, 16, value);
+       if (ret)
                return -EIO;
 
        return len;
index 6f1c859..1c5d62e 100644 (file)
@@ -131,7 +131,7 @@ static void xgbe_free_ring(struct xgbe_prv_data *pdata,
 
        if (ring->rdata) {
                for (i = 0; i < ring->rdesc_count; i++) {
-                       rdata = GET_DESC_DATA(ring, i);
+                       rdata = XGBE_GET_DESC_DATA(ring, i);
                        xgbe_unmap_skb(pdata, rdata);
                }
 
@@ -256,7 +256,7 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
                rdesc_dma = ring->rdesc_dma;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
 
                        rdata->rdesc = rdesc;
                        rdata->rdesc_dma = rdesc_dma;
@@ -298,7 +298,7 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
                rdesc_dma = ring->rdesc_dma;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
 
                        rdata->rdesc = rdesc;
                        rdata->rdesc_dma = rdesc_dma;
@@ -359,6 +359,15 @@ static void xgbe_unmap_skb(struct xgbe_prv_data *pdata,
        rdata->len = 0;
        rdata->interrupt = 0;
        rdata->mapped_as_page = 0;
+
+       if (rdata->state_saved) {
+               rdata->state_saved = 0;
+               rdata->state.incomplete = 0;
+               rdata->state.context_next = 0;
+               rdata->state.skb = NULL;
+               rdata->state.len = 0;
+               rdata->state.error = 0;
+       }
 }
 
 static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
@@ -392,7 +401,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
        if ((tso && (packet->mss != ring->tx.cur_mss)) ||
            (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
                cur_index++;
-       rdata = GET_DESC_DATA(ring, cur_index);
+       rdata = XGBE_GET_DESC_DATA(ring, cur_index);
 
        if (tso) {
                DBGPR("  TSO packet\n");
@@ -413,12 +422,12 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                packet->length += packet->header_len;
 
                cur_index++;
-               rdata = GET_DESC_DATA(ring, cur_index);
+               rdata = XGBE_GET_DESC_DATA(ring, cur_index);
        }
 
        /* Map the (remainder of the) packet */
        for (datalen = skb_headlen(skb) - offset; datalen; ) {
-               len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+               len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
 
                skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
                                         DMA_TO_DEVICE);
@@ -437,7 +446,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                packet->length += len;
 
                cur_index++;
-               rdata = GET_DESC_DATA(ring, cur_index);
+               rdata = XGBE_GET_DESC_DATA(ring, cur_index);
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
@@ -447,7 +456,8 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                offset = 0;
 
                for (datalen = skb_frag_size(frag); datalen; ) {
-                       len = min_t(unsigned int, datalen, TX_MAX_BUF_SIZE);
+                       len = min_t(unsigned int, datalen,
+                                   XGBE_TX_MAX_BUF_SIZE);
 
                        skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
                                                   len, DMA_TO_DEVICE);
@@ -468,7 +478,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
                        packet->length += len;
 
                        cur_index++;
-                       rdata = GET_DESC_DATA(ring, cur_index);
+                       rdata = XGBE_GET_DESC_DATA(ring, cur_index);
                }
        }
 
@@ -484,7 +494,7 @@ static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
 
 err_out:
        while (start_index < cur_index) {
-               rdata = GET_DESC_DATA(ring, start_index++);
+               rdata = XGBE_GET_DESC_DATA(ring, start_index++);
                xgbe_unmap_skb(pdata, rdata);
        }
 
@@ -507,7 +517,7 @@ static void xgbe_realloc_skb(struct xgbe_channel *channel)
              ring->rx.realloc_index);
 
        for (i = 0; i < ring->dirty; i++) {
-               rdata = GET_DESC_DATA(ring, ring->rx.realloc_index);
+               rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
 
                /* Reset rdata values */
                xgbe_unmap_skb(pdata, rdata);
index 002293b..edaca44 100644 (file)
 
 #include <linux/phy.h>
 #include <linux/clk.h>
+#include <linux/bitrev.h>
+#include <linux/crc32.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -129,7 +131,7 @@ static unsigned int xgbe_usec_to_riwt(struct xgbe_prv_data *pdata,
 
        DBGPR("-->xgbe_usec_to_riwt\n");
 
-       rate = clk_get_rate(pdata->sysclock);
+       rate = clk_get_rate(pdata->sysclk);
 
        /*
         * Convert the input usec value to the watchdog timer value. Each
@@ -152,7 +154,7 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
 
        DBGPR("-->xgbe_riwt_to_usec\n");
 
-       rate = clk_get_rate(pdata->sysclock);
+       rate = clk_get_rate(pdata->sysclk);
 
        /*
         * Convert the input watchdog timer value to the usec value. Each
@@ -245,7 +247,7 @@ static int xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
 {
        unsigned int i;
 
-       for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+       for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
 
        return 0;
@@ -255,7 +257,7 @@ static int xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
 {
        unsigned int i;
 
-       for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+       for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
 
        return 0;
@@ -266,7 +268,7 @@ static int xgbe_config_rx_threshold(struct xgbe_prv_data *pdata,
 {
        unsigned int i;
 
-       for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+       for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
 
        return 0;
@@ -277,7 +279,7 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
 {
        unsigned int i;
 
-       for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+       for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
 
        return 0;
@@ -341,12 +343,12 @@ static int xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
        unsigned int i;
 
        /* Clear MTL flow control */
-       for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+       for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
 
        /* Clear MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@ -366,12 +368,12 @@ static int xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
        unsigned int i;
 
        /* Set MTL flow control */
-       for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+       for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 1);
 
        /* Set MAC flow control */
        max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
-       q_count = min_t(unsigned int, pdata->hw_feat.rx_q_cnt, max_q_count);
+       q_count = min_t(unsigned int, pdata->rx_q_count, max_q_count);
        reg = MAC_Q0TFCR;
        for (i = 0; i < q_count; i++) {
                reg_val = XGMAC_IOREAD(pdata, reg);
@@ -405,7 +407,9 @@ static int xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
 
 static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
 {
-       if (pdata->tx_pause)
+       struct ieee_pfc *pfc = pdata->pfc;
+
+       if (pdata->tx_pause || (pfc && pfc->pfc_en))
                xgbe_enable_tx_flow_control(pdata);
        else
                xgbe_disable_tx_flow_control(pdata);
@@ -415,7 +419,9 @@ static int xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
 
 static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
 {
-       if (pdata->rx_pause)
+       struct ieee_pfc *pfc = pdata->pfc;
+
+       if (pdata->rx_pause || (pfc && pfc->pfc_en))
                xgbe_enable_rx_flow_control(pdata);
        else
                xgbe_disable_rx_flow_control(pdata);
@@ -425,8 +431,13 @@ static int xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
 
 static void xgbe_config_flow_control(struct xgbe_prv_data *pdata)
 {
+       struct ieee_pfc *pfc = pdata->pfc;
+
        xgbe_config_tx_flow_control(pdata);
        xgbe_config_rx_flow_control(pdata);
+
+       XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE,
+                          (pfc && pfc->pfc_en) ? 1 : 0);
 }
 
 static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
@@ -484,14 +495,18 @@ static void xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
                XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
 
                /* No MTL interrupts to be enabled */
-               XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, 0);
+               XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
        }
 }
 
 static void xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
 {
-       /* No MAC interrupts to be enabled */
-       XGMAC_IOWRITE(pdata, MAC_IER, 0);
+       unsigned int mac_ier = 0;
+
+       /* Enable Timestamp interrupt */
+       XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
+
+       XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
 
        /* Enable all counter interrupts */
        XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xff);
@@ -547,24 +562,16 @@ static int xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata,
        return 0;
 }
 
-static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
-                                  unsigned int am_mode)
+static void xgbe_set_mac_reg(struct xgbe_prv_data *pdata,
+                            struct netdev_hw_addr *ha, unsigned int *mac_reg)
 {
-       struct netdev_hw_addr *ha;
-       unsigned int mac_reg;
        unsigned int mac_addr_hi, mac_addr_lo;
        u8 *mac_addr;
-       unsigned int i;
-
-       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 0);
-       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 0);
 
-       i = 0;
-       mac_reg = MAC_MACA1HR;
+       mac_addr_lo = 0;
+       mac_addr_hi = 0;
 
-       netdev_for_each_uc_addr(ha, pdata->netdev) {
-               mac_addr_lo = 0;
-               mac_addr_hi = 0;
+       if (ha) {
                mac_addr = (u8 *)&mac_addr_lo;
                mac_addr[0] = ha->addr[0];
                mac_addr[1] = ha->addr[1];
@@ -574,54 +581,93 @@ static int xgbe_set_addn_mac_addrs(struct xgbe_prv_data *pdata,
                mac_addr[0] = ha->addr[4];
                mac_addr[1] = ha->addr[5];
 
-               DBGPR("  adding unicast address %pM at 0x%04x\n",
-                     ha->addr, mac_reg);
+               DBGPR("  adding mac address %pM at 0x%04x\n", ha->addr,
+                     *mac_reg);
 
                XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
+       }
 
-               XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
-               mac_reg += MAC_MACA_INC;
-               XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
-               mac_reg += MAC_MACA_INC;
+       XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
+       *mac_reg += MAC_MACA_INC;
+       XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
+       *mac_reg += MAC_MACA_INC;
+}
 
-               i++;
-       }
+static void xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       struct netdev_hw_addr *ha;
+       unsigned int mac_reg;
+       unsigned int addn_macs;
+
+       mac_reg = MAC_MACA1HR;
+       addn_macs = pdata->hw_feat.addn_mac;
 
-       if (!am_mode) {
-               netdev_for_each_mc_addr(ha, pdata->netdev) {
-                       mac_addr_lo = 0;
-                       mac_addr_hi = 0;
-                       mac_addr = (u8 *)&mac_addr_lo;
-                       mac_addr[0] = ha->addr[0];
-                       mac_addr[1] = ha->addr[1];
-                       mac_addr[2] = ha->addr[2];
-                       mac_addr[3] = ha->addr[3];
-                       mac_addr = (u8 *)&mac_addr_hi;
-                       mac_addr[0] = ha->addr[4];
-                       mac_addr[1] = ha->addr[5];
-
-                       DBGPR("  adding multicast address %pM at 0x%04x\n",
-                             ha->addr, mac_reg);
-
-                       XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
-
-                       XGMAC_IOWRITE(pdata, mac_reg, mac_addr_hi);
-                       mac_reg += MAC_MACA_INC;
-                       XGMAC_IOWRITE(pdata, mac_reg, mac_addr_lo);
-                       mac_reg += MAC_MACA_INC;
-
-                       i++;
+       if (netdev_uc_count(netdev) > addn_macs) {
+               xgbe_set_promiscuous_mode(pdata, 1);
+       } else {
+               netdev_for_each_uc_addr(ha, netdev) {
+                       xgbe_set_mac_reg(pdata, ha, &mac_reg);
+                       addn_macs--;
+               }
+
+               if (netdev_mc_count(netdev) > addn_macs) {
+                       xgbe_set_all_multicast_mode(pdata, 1);
+               } else {
+                       netdev_for_each_mc_addr(ha, netdev) {
+                               xgbe_set_mac_reg(pdata, ha, &mac_reg);
+                               addn_macs--;
+                       }
                }
        }
 
        /* Clear remaining additional MAC address entries */
-       for (; i < pdata->hw_feat.addn_mac; i++) {
-               XGMAC_IOWRITE(pdata, mac_reg, 0);
-               mac_reg += MAC_MACA_INC;
-               XGMAC_IOWRITE(pdata, mac_reg, 0);
-               mac_reg += MAC_MACA_INC;
+       while (addn_macs--)
+               xgbe_set_mac_reg(pdata, NULL, &mac_reg);
+}
+
+static void xgbe_set_mac_hash_table(struct xgbe_prv_data *pdata)
+{
+       struct net_device *netdev = pdata->netdev;
+       struct netdev_hw_addr *ha;
+       unsigned int hash_reg;
+       unsigned int hash_table_shift, hash_table_count;
+       u32 hash_table[XGBE_MAC_HASH_TABLE_SIZE];
+       u32 crc;
+       unsigned int i;
+
+       hash_table_shift = 26 - (pdata->hw_feat.hash_table_size >> 7);
+       hash_table_count = pdata->hw_feat.hash_table_size / 32;
+       memset(hash_table, 0, sizeof(hash_table));
+
+       /* Build the MAC Hash Table register values */
+       netdev_for_each_uc_addr(ha, netdev) {
+               crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+               crc >>= hash_table_shift;
+               hash_table[crc >> 5] |= (1 << (crc & 0x1f));
+       }
+
+       netdev_for_each_mc_addr(ha, netdev) {
+               crc = bitrev32(~crc32_le(~0, ha->addr, ETH_ALEN));
+               crc >>= hash_table_shift;
+               hash_table[crc >> 5] |= (1 << (crc & 0x1f));
        }
 
+       /* Set the MAC Hash Table registers */
+       hash_reg = MAC_HTR0;
+       for (i = 0; i < hash_table_count; i++) {
+               XGMAC_IOWRITE(pdata, hash_reg, hash_table[i]);
+               hash_reg += MAC_HTR_INC;
+       }
+}
+
+static int xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
+{
+       if (pdata->hw_feat.hash_table_size)
+               xgbe_set_mac_hash_table(pdata);
+       else
+               xgbe_set_mac_addn_addrs(pdata);
+
        return 0;
 }
 
@@ -738,6 +784,89 @@ static int xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
        return 0;
 }
 
+static int xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+       /* Enable VLAN filtering */
+       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
+
+       /* Enable VLAN Hash Table filtering */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
+
+       /* Disable VLAN tag inverse matching */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
+
+       /* Only filter on the lower 12-bits of the VLAN tag */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
+
+       /* In order for the VLAN Hash Table filtering to be effective,
+        * the VLAN tag identifier in the VLAN Tag Register must not
+        * be zero.  Set the VLAN tag identifier to "1" to enable the
+        * VLAN Hash Table filtering.  This implies that a VLAN tag of
+        * 1 will always pass filtering.
+        */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
+
+       return 0;
+}
+
+static int xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
+{
+       /* Disable VLAN filtering */
+       XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
+
+       return 0;
+}
+
+#ifndef CRCPOLY_LE
+#define CRCPOLY_LE 0xedb88320
+#endif
+static u32 xgbe_vid_crc32_le(__le16 vid_le)
+{
+       u32 poly = CRCPOLY_LE;
+       u32 crc = ~0;
+       u32 temp = 0;
+       unsigned char *data = (unsigned char *)&vid_le;
+       unsigned char data_byte = 0;
+       int i, bits;
+
+       bits = get_bitmask_order(VLAN_VID_MASK);
+       for (i = 0; i < bits; i++) {
+               if ((i % 8) == 0)
+                       data_byte = data[i / 8];
+
+               temp = ((crc & 1) ^ data_byte) & 1;
+               crc >>= 1;
+               data_byte >>= 1;
+
+               if (temp)
+                       crc ^= poly;
+       }
+
+       return crc;
+}
+
+static int xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
+{
+       u32 crc;
+       u16 vid;
+       __le16 vid_le;
+       u16 vlan_hash_table = 0;
+
+       /* Generate the VLAN Hash Table value */
+       for_each_set_bit(vid, pdata->active_vlans, VLAN_N_VID) {
+               /* Get the CRC32 value of the VLAN ID */
+               vid_le = cpu_to_le16(vid);
+               crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
+
+               vlan_hash_table |= (1 << crc);
+       }
+
+       /* Set the VLAN Hash Table filtering register */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
+
+       return 0;
+}
+
 static void xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
 {
        struct xgbe_ring_desc *rdesc = rdata->rdesc;
@@ -766,7 +895,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
 
        /* Initialze all descriptors */
        for (i = 0; i < ring->rdesc_count; i++) {
-               rdata = GET_DESC_DATA(ring, i);
+               rdata = XGBE_GET_DESC_DATA(ring, i);
                rdesc = rdata->rdesc;
 
                /* Initialize Tx descriptor
@@ -791,7 +920,7 @@ static void xgbe_tx_desc_init(struct xgbe_channel *channel)
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
 
        /* Update the starting address of descriptor ring */
-       rdata = GET_DESC_DATA(ring, start_index);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
                          upper_32_bits(rdata->rdesc_dma));
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
@@ -848,7 +977,7 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
 
        /* Initialize all descriptors */
        for (i = 0; i < ring->rdesc_count; i++) {
-               rdata = GET_DESC_DATA(ring, i);
+               rdata = XGBE_GET_DESC_DATA(ring, i);
                rdesc = rdata->rdesc;
 
                /* Initialize Rx descriptor
@@ -882,20 +1011,194 @@ static void xgbe_rx_desc_init(struct xgbe_channel *channel)
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
 
        /* Update the starting address of descriptor ring */
-       rdata = GET_DESC_DATA(ring, start_index);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
                          upper_32_bits(rdata->rdesc_dma));
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 
        /* Update the Rx Descriptor Tail Pointer */
-       rdata = GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index + ring->rdesc_count - 1);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 
        DBGPR("<--rx_desc_init\n");
 }
 
+static void xgbe_update_tstamp_addend(struct xgbe_prv_data *pdata,
+                                     unsigned int addend)
+{
+       /* Set the addend register value and tell the device */
+       XGMAC_IOWRITE(pdata, MAC_TSAR, addend);
+       XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSADDREG, 1);
+
+       /* Wait for addend update to complete */
+       while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSADDREG))
+               udelay(5);
+}
+
+static void xgbe_set_tstamp_time(struct xgbe_prv_data *pdata, unsigned int sec,
+                                unsigned int nsec)
+{
+       /* Set the time values and tell the device */
+       XGMAC_IOWRITE(pdata, MAC_STSUR, sec);
+       XGMAC_IOWRITE(pdata, MAC_STNUR, nsec);
+       XGMAC_IOWRITE_BITS(pdata, MAC_TSCR, TSINIT, 1);
+
+       /* Wait for time update to complete */
+       while (XGMAC_IOREAD_BITS(pdata, MAC_TSCR, TSINIT))
+               udelay(5);
+}
+
+static u64 xgbe_get_tstamp_time(struct xgbe_prv_data *pdata)
+{
+       u64 nsec;
+
+       nsec = XGMAC_IOREAD(pdata, MAC_STSR);
+       nsec *= NSEC_PER_SEC;
+       nsec += XGMAC_IOREAD(pdata, MAC_STNR);
+
+       return nsec;
+}
+
+static u64 xgbe_get_tx_tstamp(struct xgbe_prv_data *pdata)
+{
+       unsigned int tx_snr;
+       u64 nsec;
+
+       tx_snr = XGMAC_IOREAD(pdata, MAC_TXSNR);
+       if (XGMAC_GET_BITS(tx_snr, MAC_TXSNR, TXTSSTSMIS))
+               return 0;
+
+       nsec = XGMAC_IOREAD(pdata, MAC_TXSSR);
+       nsec *= NSEC_PER_SEC;
+       nsec += tx_snr;
+
+       return nsec;
+}
+
+static void xgbe_get_rx_tstamp(struct xgbe_packet_data *packet,
+                              struct xgbe_ring_desc *rdesc)
+{
+       u64 nsec;
+
+       if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSA) &&
+           !XGMAC_GET_BITS_LE(rdesc->desc3, RX_CONTEXT_DESC3, TSD)) {
+               nsec = le32_to_cpu(rdesc->desc1);
+               nsec <<= 32;
+               nsec |= le32_to_cpu(rdesc->desc0);
+               if (nsec != 0xffffffffffffffffULL) {
+                       packet->rx_tstamp = nsec;
+                       XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                                      RX_TSTAMP, 1);
+               }
+       }
+}
+
+static int xgbe_config_tstamp(struct xgbe_prv_data *pdata,
+                             unsigned int mac_tscr)
+{
+       /* Set one nano-second accuracy */
+       XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCTRLSSR, 1);
+
+       /* Set fine timestamp update */
+       XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSCFUPDT, 1);
+
+       /* Overwrite earlier timestamps */
+       XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TXTSSTSM, 1);
+
+       XGMAC_IOWRITE(pdata, MAC_TSCR, mac_tscr);
+
+       /* Exit if timestamping is not enabled */
+       if (!XGMAC_GET_BITS(mac_tscr, MAC_TSCR, TSENA))
+               return 0;
+
+       /* Initialize time registers */
+       XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SSINC, XGBE_TSTAMP_SSINC);
+       XGMAC_IOWRITE_BITS(pdata, MAC_SSIR, SNSINC, XGBE_TSTAMP_SNSINC);
+       xgbe_update_tstamp_addend(pdata, pdata->tstamp_addend);
+       xgbe_set_tstamp_time(pdata, 0, 0);
+
+       /* Initialize the timecounter */
+       timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+                        ktime_to_ns(ktime_get_real()));
+
+       return 0;
+}
+
+static void xgbe_config_dcb_tc(struct xgbe_prv_data *pdata)
+{
+       struct ieee_ets *ets = pdata->ets;
+       unsigned int total_weight, min_weight, weight;
+       unsigned int i;
+
+       if (!ets)
+               return;
+
+       /* Set Tx to deficit weighted round robin scheduling algorithm (when
+        * traffic class is using ETS algorithm)
+        */
+       XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_DWRR);
+
+       /* Set Traffic Class algorithms */
+       total_weight = pdata->netdev->mtu * pdata->hw_feat.tc_cnt;
+       min_weight = total_weight / 100;
+       if (!min_weight)
+               min_weight = 1;
+
+       for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+               switch (ets->tc_tsa[i]) {
+               case IEEE_8021QAZ_TSA_STRICT:
+                       DBGPR("  TC%u using SP\n", i);
+                       XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+                                              MTL_TSA_SP);
+                       break;
+               case IEEE_8021QAZ_TSA_ETS:
+                       weight = total_weight * ets->tc_tx_bw[i] / 100;
+                       weight = clamp(weight, min_weight, total_weight);
+
+                       DBGPR("  TC%u using DWRR (weight %u)\n", i, weight);
+                       XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+                                              MTL_TSA_ETS);
+                       XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW,
+                                              weight);
+                       break;
+               }
+       }
+}
+
+static void xgbe_config_dcb_pfc(struct xgbe_prv_data *pdata)
+{
+       struct ieee_pfc *pfc = pdata->pfc;
+       struct ieee_ets *ets = pdata->ets;
+       unsigned int mask, reg, reg_val;
+       unsigned int tc, prio;
+
+       if (!pfc || !ets)
+               return;
+
+       for (tc = 0; tc < pdata->hw_feat.tc_cnt; tc++) {
+               mask = 0;
+               for (prio = 0; prio < IEEE_8021QAZ_MAX_TCS; prio++) {
+                       if ((pfc->pfc_en & (1 << prio)) &&
+                           (ets->prio_tc[prio] == tc))
+                               mask |= (1 << prio);
+               }
+               mask &= 0xff;
+
+               DBGPR("  TC%u PFC mask=%#x\n", tc, mask);
+               reg = MTL_TCPM0R + (MTL_TCPM_INC * (tc / MTL_TCPM_TC_PER_REG));
+               reg_val = XGMAC_IOREAD(pdata, reg);
+
+               reg_val &= ~(0xff << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+               reg_val |= (mask << ((tc % MTL_TCPM_TC_PER_REG) << 3));
+
+               XGMAC_IOWRITE(pdata, reg, reg_val);
+       }
+
+       xgbe_config_flow_control(pdata);
+}
+
 static void xgbe_pre_xmit(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
@@ -933,7 +1236,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
        if (tx_coalesce && !channel->tx_timer_active)
                ring->coalesce_count = 0;
 
-       rdata = GET_DESC_DATA(ring, ring->cur);
+       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        rdesc = rdata->rdesc;
 
        /* Create a context descriptor if this is a TSO packet */
@@ -977,7 +1280,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
                }
 
                ring->cur++;
-               rdata = GET_DESC_DATA(ring, ring->cur);
+               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdesc = rdata->rdesc;
        }
 
@@ -994,6 +1297,10 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
                XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, VTIR,
                                  TX_NORMAL_DESC2_VLAN_INSERT);
 
+       /* Timestamp enablement check */
+       if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+               XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, TTSE, 1);
+
        /* Set IC bit based on Tx coalescing settings */
        XGMAC_SET_BITS_LE(rdesc->desc2, TX_NORMAL_DESC2, IC, 1);
        if (tx_coalesce && (!tx_frames ||
@@ -1034,7 +1341,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
 
        for (i = ring->cur - start_index + 1; i < packet->rdesc_count; i++) {
                ring->cur++;
-               rdata = GET_DESC_DATA(ring, ring->cur);
+               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
                rdesc = rdata->rdesc;
 
                /* Update buffer address */
@@ -1074,7 +1381,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
        wmb();
 
        /* Set OWN bit for the first descriptor */
-       rdata = GET_DESC_DATA(ring, start_index);
+       rdata = XGBE_GET_DESC_DATA(ring, start_index);
        rdesc = rdata->rdesc;
        XGMAC_SET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN, 1);
 
@@ -1088,7 +1395,7 @@ static void xgbe_pre_xmit(struct xgbe_channel *channel)
        /* Issue a poll command to Tx DMA by writing address
         * of next immediate free descriptor */
        ring->cur++;
-       rdata = GET_DESC_DATA(ring, ring->cur);
+       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        XGMAC_DMA_IOWRITE(channel, DMA_CH_TDTR_LO,
                          lower_32_bits(rdata->rdesc_dma));
 
@@ -1113,11 +1420,12 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
        struct xgbe_ring_data *rdata;
        struct xgbe_ring_desc *rdesc;
        struct xgbe_packet_data *packet = &ring->packet_data;
+       struct net_device *netdev = channel->pdata->netdev;
        unsigned int err, etlt;
 
        DBGPR("-->xgbe_dev_read: cur = %d\n", ring->cur);
 
-       rdata = GET_DESC_DATA(ring, ring->cur);
+       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        rdesc = rdata->rdesc;
 
        /* Check for data availability */
@@ -1128,6 +1436,25 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
        xgbe_dump_rx_desc(ring, rdesc, ring->cur);
 #endif
 
+       if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
+               /* Timestamp Context Descriptor */
+               xgbe_get_rx_tstamp(packet, rdesc);
+
+               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                              CONTEXT, 1);
+               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                              CONTEXT_NEXT, 0);
+               return 0;
+       }
+
+       /* Normal Descriptor, be sure Context Descriptor bit is off */
+       XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
+
+       /* Indicate if a Context Descriptor is next */
+       if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
+               XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
+                              CONTEXT_NEXT, 1);
+
        /* Get the packet length */
        rdata->len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
 
@@ -1153,7 +1480,8 @@ static int xgbe_dev_read(struct xgbe_channel *channel)
        DBGPR("  err=%u, etlt=%#x\n", err, etlt);
 
        if (!err || (err && !etlt)) {
-               if (etlt == 0x09) {
+               if ((etlt == 0x09) &&
+                   (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)) {
                        XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
                                       VLAN_CTAG, 1);
                        packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
@@ -1188,56 +1516,48 @@ static int xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
        return XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD);
 }
 
-static void xgbe_save_interrupt_status(struct xgbe_channel *channel,
-                                      enum xgbe_int_state int_state)
+static int xgbe_enable_int(struct xgbe_channel *channel,
+                          enum xgbe_int int_id)
 {
        unsigned int dma_ch_ier;
 
-       if (int_state == XGMAC_INT_STATE_SAVE) {
-               channel->saved_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
-               channel->saved_ier &= DMA_INTERRUPT_MASK;
-       } else {
-               dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
-               dma_ch_ier |= channel->saved_ier;
-               XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
-       }
-}
+       dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
 
-static int xgbe_enable_int(struct xgbe_channel *channel,
-                          enum xgbe_int int_id)
-{
        switch (int_id) {
-       case XGMAC_INT_DMA_ISR_DC0IS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
-               break;
        case XGMAC_INT_DMA_CH_SR_TI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_TPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_TBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_RI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_RBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_RPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 1);
+               break;
+       case XGMAC_INT_DMA_CH_SR_TI_RI:
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 1);
                break;
        case XGMAC_INT_DMA_CH_SR_FBE:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 1);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 1);
                break;
        case XGMAC_INT_DMA_ALL:
-               xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_RESTORE);
+               dma_ch_ier |= channel->saved_ier;
                break;
        default:
                return -1;
        }
 
+       XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
        return 0;
 }
 
@@ -1246,42 +1566,44 @@ static int xgbe_disable_int(struct xgbe_channel *channel,
 {
        unsigned int dma_ch_ier;
 
+       dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
+
        switch (int_id) {
-       case XGMAC_INT_DMA_ISR_DC0IS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
-               break;
        case XGMAC_INT_DMA_CH_SR_TI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TIE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_TPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TXSE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TXSE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_TBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, TBUE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TBUE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_RI:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RIE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_RBU:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RBUE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RBUE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_RPS:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, RSE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RSE, 0);
+               break;
+       case XGMAC_INT_DMA_CH_SR_TI_RI:
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, TIE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, RIE, 0);
                break;
        case XGMAC_INT_DMA_CH_SR_FBE:
-               XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_IER, FBEE, 0);
+               XGMAC_SET_BITS(dma_ch_ier, DMA_CH_IER, FBEE, 0);
                break;
        case XGMAC_INT_DMA_ALL:
-               xgbe_save_interrupt_status(channel, XGMAC_INT_STATE_SAVE);
-
-               dma_ch_ier = XGMAC_DMA_IOREAD(channel, DMA_CH_IER);
-               dma_ch_ier &= ~DMA_INTERRUPT_MASK;
-               XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+               channel->saved_ier = dma_ch_ier & XGBE_DMA_INTERRUPT_MASK;
+               dma_ch_ier &= ~XGBE_DMA_INTERRUPT_MASK;
                break;
        default:
                return -1;
        }
 
+       XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, dma_ch_ier);
+
        return 0;
 }
 
@@ -1311,11 +1633,11 @@ static int xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
 {
        unsigned int i, count;
 
-       for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+       for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
 
        /* Poll Until Poll Condition */
-       for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++) {
+       for (i = 0; i < pdata->tx_q_count; i++) {
                count = 2000;
                while (count-- && XGMAC_MTL_IOREAD_BITS(pdata, i,
                                                        MTL_Q_TQOMR, FTQ))
@@ -1335,6 +1657,7 @@ static void xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
 
        /* Set the System Bus mode */
        XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, UNDEF, 1);
+       XGMAC_IOWRITE_BITS(pdata, DMA_SBMR, BLEN_256, 1);
 }
 
 static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
@@ -1342,23 +1665,23 @@ static void xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
        unsigned int arcache, awcache;
 
        arcache = 0;
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, DMA_ARCACHE_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, DMA_ARDOMAIN_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, DMA_ARCACHE_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, DMA_ARDOMAIN_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, DMA_ARCACHE_SETTING);
-       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, DMA_ARDOMAIN_SETTING);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRC, pdata->arcache);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, DRD, pdata->axdomain);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TEC, pdata->arcache);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, TED, pdata->axdomain);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THC, pdata->arcache);
+       XGMAC_SET_BITS(arcache, DMA_AXIARCR, THD, pdata->axdomain);
        XGMAC_IOWRITE(pdata, DMA_AXIARCR, arcache);
 
        awcache = 0;
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, DMA_AWDOMAIN_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, DMA_AWDOMAIN_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, DMA_AWDOMAIN_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, DMA_AWCACHE_SETTING);
-       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, DMA_AWDOMAIN_SETTING);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, DWD, pdata->axdomain);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RPD, pdata->axdomain);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, RHD, pdata->axdomain);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDC, pdata->awcache);
+       XGMAC_SET_BITS(awcache, DMA_AXIAWCR, TDD, pdata->axdomain);
        XGMAC_IOWRITE(pdata, DMA_AXIAWCR, awcache);
 }
 
@@ -1366,14 +1689,15 @@ static void xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
 {
        unsigned int i;
 
-       /* Set Tx to weighted round robin scheduling algorithm (when
-        * traffic class is using ETS algorithm)
-        */
+       /* Set Tx to weighted round robin scheduling algorithm */
        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
 
-       /* Set Tx traffic classes to strict priority algorithm */
-       for (i = 0; i < XGBE_TC_CNT; i++)
-               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA, MTL_TSA_SP);
+       /* Set Tx traffic classes to use WRR algorithm with equal weights */
+       for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
+                                      MTL_TSA_ETS);
+               XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
+       }
 
        /* Set Rx to strict priority algorithm */
        XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
@@ -1388,66 +1712,66 @@ static unsigned int xgbe_calculate_per_queue_fifo(unsigned long fifo_size,
        /* Calculate Tx/Rx fifo share per queue */
        switch (fifo_size) {
        case 0:
-               q_fifo_size = FIFO_SIZE_B(128);
+               q_fifo_size = XGBE_FIFO_SIZE_B(128);
                break;
        case 1:
-               q_fifo_size = FIFO_SIZE_B(256);
+               q_fifo_size = XGBE_FIFO_SIZE_B(256);
                break;
        case 2:
-               q_fifo_size = FIFO_SIZE_B(512);
+               q_fifo_size = XGBE_FIFO_SIZE_B(512);
                break;
        case 3:
-               q_fifo_size = FIFO_SIZE_KB(1);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(1);
                break;
        case 4:
-               q_fifo_size = FIFO_SIZE_KB(2);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(2);
                break;
        case 5:
-               q_fifo_size = FIFO_SIZE_KB(4);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(4);
                break;
        case 6:
-               q_fifo_size = FIFO_SIZE_KB(8);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(8);
                break;
        case 7:
-               q_fifo_size = FIFO_SIZE_KB(16);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(16);
                break;
        case 8:
-               q_fifo_size = FIFO_SIZE_KB(32);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(32);
                break;
        case 9:
-               q_fifo_size = FIFO_SIZE_KB(64);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(64);
                break;
        case 10:
-               q_fifo_size = FIFO_SIZE_KB(128);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(128);
                break;
        case 11:
-               q_fifo_size = FIFO_SIZE_KB(256);
+               q_fifo_size = XGBE_FIFO_SIZE_KB(256);
                break;
        }
        q_fifo_size = q_fifo_size / queue_count;
 
        /* Set the queue fifo size programmable value */
-       if (q_fifo_size >= FIFO_SIZE_KB(256))
+       if (q_fifo_size >= XGBE_FIFO_SIZE_KB(256))
                p_fifo = XGMAC_MTL_FIFO_SIZE_256K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(128))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(128))
                p_fifo = XGMAC_MTL_FIFO_SIZE_128K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(64))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(64))
                p_fifo = XGMAC_MTL_FIFO_SIZE_64K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(32))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(32))
                p_fifo = XGMAC_MTL_FIFO_SIZE_32K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(16))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(16))
                p_fifo = XGMAC_MTL_FIFO_SIZE_16K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(8))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(8))
                p_fifo = XGMAC_MTL_FIFO_SIZE_8K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(4))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(4))
                p_fifo = XGMAC_MTL_FIFO_SIZE_4K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(2))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(2))
                p_fifo = XGMAC_MTL_FIFO_SIZE_2K;
-       else if (q_fifo_size >= FIFO_SIZE_KB(1))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_KB(1))
                p_fifo = XGMAC_MTL_FIFO_SIZE_1K;
-       else if (q_fifo_size >= FIFO_SIZE_B(512))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_B(512))
                p_fifo = XGMAC_MTL_FIFO_SIZE_512;
-       else if (q_fifo_size >= FIFO_SIZE_B(256))
+       else if (q_fifo_size >= XGBE_FIFO_SIZE_B(256))
                p_fifo = XGMAC_MTL_FIFO_SIZE_256;
 
        return p_fifo;
@@ -1459,13 +1783,13 @@ static void xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
        unsigned int i;
 
        fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.tx_fifo_size,
-                                                 pdata->hw_feat.tx_q_cnt);
+                                                 pdata->tx_q_count);
 
-       for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+       for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo_size);
 
        netdev_notice(pdata->netdev, "%d Tx queues, %d byte fifo per queue\n",
-                     pdata->hw_feat.tx_q_cnt, ((fifo_size + 1) * 256));
+                     pdata->tx_q_count, ((fifo_size + 1) * 256));
 }
 
 static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
@@ -1474,27 +1798,84 @@ static void xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
        unsigned int i;
 
        fifo_size = xgbe_calculate_per_queue_fifo(pdata->hw_feat.rx_fifo_size,
-                                                 pdata->hw_feat.rx_q_cnt);
+                                                 pdata->rx_q_count);
 
-       for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+       for (i = 0; i < pdata->rx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo_size);
 
        netdev_notice(pdata->netdev, "%d Rx queues, %d byte fifo per queue\n",
-                     pdata->hw_feat.rx_q_cnt, ((fifo_size + 1) * 256));
+                     pdata->rx_q_count, ((fifo_size + 1) * 256));
 }
 
-static void xgbe_config_rx_queue_mapping(struct xgbe_prv_data *pdata)
+static void xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
 {
-       unsigned int i, reg, reg_val;
-       unsigned int q_count = pdata->hw_feat.rx_q_cnt;
+       unsigned int qptc, qptc_extra, queue;
+       unsigned int prio_queues;
+       unsigned int ppq, ppq_extra, prio;
+       unsigned int mask;
+       unsigned int i, j, reg, reg_val;
+
+       /* Map the MTL Tx Queues to Traffic Classes
+        *   Note: Tx Queues >= Traffic Classes
+        */
+       qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
+       qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
+
+       for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
+               for (j = 0; j < qptc; j++) {
+                       DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+                       XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+                                              Q2TCMAP, i);
+                       pdata->q2tc_map[queue++] = i;
+               }
+
+               if (i < qptc_extra) {
+                       DBGPR("  TXq%u mapped to TC%u\n", queue, i);
+                       XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
+                                              Q2TCMAP, i);
+                       pdata->q2tc_map[queue++] = i;
+               }
+       }
+
+       /* Map the 8 VLAN priority values to available MTL Rx queues */
+       prio_queues = min_t(unsigned int, IEEE_8021QAZ_MAX_TCS,
+                           pdata->rx_q_count);
+       ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
+       ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
+
+       reg = MAC_RQC2R;
+       reg_val = 0;
+       for (i = 0, prio = 0; i < prio_queues;) {
+               mask = 0;
+               for (j = 0; j < ppq; j++) {
+                       DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+                       mask |= (1 << prio);
+                       pdata->prio2q_map[prio++] = i;
+               }
+
+               if (i < ppq_extra) {
+                       DBGPR("  PRIO%u mapped to RXq%u\n", prio, i);
+                       mask |= (1 << prio);
+                       pdata->prio2q_map[prio++] = i;
+               }
+
+               reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
+
+               if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
+                       continue;
+
+               XGMAC_IOWRITE(pdata, reg, reg_val);
+               reg += MAC_RQC2_INC;
+               reg_val = 0;
+       }
 
        /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
        reg = MTL_RQDCM0R;
        reg_val = 0;
-       for (i = 0; i < q_count;) {
+       for (i = 0; i < pdata->rx_q_count;) {
                reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
 
-               if ((i % MTL_RQDCM_Q_PER_REG) && (i != q_count))
+               if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
                        continue;
 
                XGMAC_IOWRITE(pdata, reg, reg_val);
@@ -1508,7 +1889,7 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
 {
        unsigned int i;
 
-       for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++) {
+       for (i = 0; i < pdata->rx_q_count; i++) {
                /* Activate flow control when less than 4k left in fifo */
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RFA, 2);
 
@@ -1520,6 +1901,13 @@ static void xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
 static void xgbe_config_mac_address(struct xgbe_prv_data *pdata)
 {
        xgbe_set_mac_address(pdata, pdata->netdev->dev_addr);
+
+       /* Filtering is done using perfect filtering and hash filtering */
+       if (pdata->hw_feat.hash_table_size) {
+               XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
+               XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
+               XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
+       }
 }
 
 static void xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
@@ -1541,6 +1929,18 @@ static void xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
 
 static void xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
 {
+       /* Indicate that VLAN Tx CTAGs come from context descriptors */
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
+       XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
+
+       /* Set the current VLAN Hash Table register value */
+       xgbe_update_vlan_hash_table(pdata);
+
+       if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
+               xgbe_enable_rx_vlan_filtering(pdata);
+       else
+               xgbe_disable_rx_vlan_filtering(pdata);
+
        if (pdata->netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
                xgbe_enable_rx_vlan_stripping(pdata);
        else
@@ -1881,7 +2281,7 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
        }
 
        /* Enable each Tx queue */
-       for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+       for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
                                       MTL_Q_ENABLED);
 
@@ -1898,7 +2298,7 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
        XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
 
        /* Disable each Tx queue */
-       for (i = 0; i < pdata->hw_feat.tx_q_cnt; i++)
+       for (i = 0; i < pdata->tx_q_count; i++)
                XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
 
        /* Disable each Tx DMA channel */
@@ -1927,7 +2327,7 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
 
        /* Enable each Rx queue */
        reg_val = 0;
-       for (i = 0; i < pdata->hw_feat.rx_q_cnt; i++)
+       for (i = 0; i < pdata->rx_q_count; i++)
                reg_val |= (0x02 << (i << 1));
        XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
 
@@ -2061,9 +2461,7 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
         * Initialize MTL related features
         */
        xgbe_config_mtl_mode(pdata);
-       xgbe_config_rx_queue_mapping(pdata);
-       /*TODO: Program the priorities mapped to the Selected Traffic Classes
-               in MTL_TC_Prty_Map0-3 registers */
+       xgbe_config_queue_mapping(pdata);
        xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
        xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
        xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
@@ -2071,15 +2469,13 @@ static int xgbe_init(struct xgbe_prv_data *pdata)
        xgbe_config_tx_fifo_size(pdata);
        xgbe_config_rx_fifo_size(pdata);
        xgbe_config_flow_control_threshold(pdata);
-       /*TODO: Queue to Traffic Class Mapping (Q2TCMAP) */
        /*TODO: Error Packet and undersized good Packet forwarding enable
                (FEP and FUP)
         */
+       xgbe_config_dcb_tc(pdata);
+       xgbe_config_dcb_pfc(pdata);
        xgbe_enable_mtl_interrupts(pdata);
 
-       /* Transmit Class Weight */
-       XGMAC_IOWRITE_BITS(pdata, MTL_Q_TCQWR, QW, 0x10);
-
        /*
         * Initialize MAC related features
         */
@@ -2104,7 +2500,7 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
 
        hw_if->set_promiscuous_mode = xgbe_set_promiscuous_mode;
        hw_if->set_all_multicast_mode = xgbe_set_all_multicast_mode;
-       hw_if->set_addn_mac_addrs = xgbe_set_addn_mac_addrs;
+       hw_if->add_mac_addresses = xgbe_add_mac_addresses;
        hw_if->set_mac_address = xgbe_set_mac_address;
 
        hw_if->enable_rx_csum = xgbe_enable_rx_csum;
@@ -2112,6 +2508,9 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
 
        hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
        hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
+       hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
+       hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
+       hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
 
        hw_if->read_mmd_regs = xgbe_read_mmd_regs;
        hw_if->write_mmd_regs = xgbe_write_mmd_regs;
@@ -2178,5 +2577,16 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
        hw_if->rx_mmc_int = xgbe_rx_mmc_int;
        hw_if->read_mmc_stats = xgbe_read_mmc_stats;
 
+       /* For PTP config */
+       hw_if->config_tstamp = xgbe_config_tstamp;
+       hw_if->update_tstamp_addend = xgbe_update_tstamp_addend;
+       hw_if->set_tstamp_time = xgbe_set_tstamp_time;
+       hw_if->get_tstamp_time = xgbe_get_tstamp_time;
+       hw_if->get_tx_tstamp = xgbe_get_tx_tstamp;
+
+       /* For Data Center Bridging config */
+       hw_if->config_dcb_tc = xgbe_config_dcb_tc;
+       hw_if->config_dcb_pfc = xgbe_config_dcb_pfc;
+
        DBGPR("<--xgbe_init_function_ptrs\n");
 }
index cfe3d93..3bf3c01 100644 (file)
 #include <net/busy_poll.h>
 #include <linux/clk.h>
 #include <linux/if_ether.h>
+#include <linux/net_tstamp.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -144,9 +145,10 @@ static int xgbe_calc_rx_buf_size(struct net_device *netdev, unsigned int mtu)
        }
 
        rx_buf_size = mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN;
-       if (rx_buf_size < RX_MIN_BUF_SIZE)
-               rx_buf_size = RX_MIN_BUF_SIZE;
-       rx_buf_size = (rx_buf_size + RX_BUF_ALIGN - 1) & ~(RX_BUF_ALIGN - 1);
+       if (rx_buf_size < XGBE_RX_MIN_BUF_SIZE)
+               rx_buf_size = XGBE_RX_MIN_BUF_SIZE;
+       rx_buf_size = (rx_buf_size + XGBE_RX_BUF_ALIGN - 1) &
+                     ~(XGBE_RX_BUF_ALIGN - 1);
 
        return rx_buf_size;
 }
@@ -155,16 +157,21 @@ static void xgbe_enable_rx_tx_ints(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_channel *channel;
+       enum xgbe_int int_id;
        unsigned int i;
 
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
-               if (channel->tx_ring)
-                       hw_if->enable_int(channel,
-                                         XGMAC_INT_DMA_CH_SR_TI);
-               if (channel->rx_ring)
-                       hw_if->enable_int(channel,
-                                         XGMAC_INT_DMA_CH_SR_RI);
+               if (channel->tx_ring && channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+               else if (channel->tx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI;
+               else if (channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_RI;
+               else
+                       continue;
+
+               hw_if->enable_int(channel, int_id);
        }
 }
 
@@ -172,16 +179,21 @@ static void xgbe_disable_rx_tx_ints(struct xgbe_prv_data *pdata)
 {
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_channel *channel;
+       enum xgbe_int int_id;
        unsigned int i;
 
        channel = pdata->channel;
        for (i = 0; i < pdata->channel_count; i++, channel++) {
-               if (channel->tx_ring)
-                       hw_if->disable_int(channel,
-                                          XGMAC_INT_DMA_CH_SR_TI);
-               if (channel->rx_ring)
-                       hw_if->disable_int(channel,
-                                          XGMAC_INT_DMA_CH_SR_RI);
+               if (channel->tx_ring && channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI_RI;
+               else if (channel->tx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_TI;
+               else if (channel->rx_ring)
+                       int_id = XGMAC_INT_DMA_CH_SR_RI;
+               else
+                       continue;
+
+               hw_if->disable_int(channel, int_id);
        }
 }
 
@@ -191,7 +203,7 @@ static irqreturn_t xgbe_isr(int irq, void *data)
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct xgbe_channel *channel;
        unsigned int dma_isr, dma_ch_isr;
-       unsigned int mac_isr;
+       unsigned int mac_isr, mac_tssr;
        unsigned int i;
 
        /* The DMA interrupt status register also reports MAC and MTL
@@ -244,6 +256,17 @@ static irqreturn_t xgbe_isr(int irq, void *data)
 
                if (XGMAC_GET_BITS(mac_isr, MAC_ISR, MMCRXIS))
                        hw_if->rx_mmc_int(pdata);
+
+               if (XGMAC_GET_BITS(mac_isr, MAC_ISR, TSIS)) {
+                       mac_tssr = XGMAC_IOREAD(pdata, MAC_TSSR);
+
+                       if (XGMAC_GET_BITS(mac_tssr, MAC_TSSR, TXTSC)) {
+                               /* Read Tx Timestamp to clear interrupt */
+                               pdata->tx_tstamp =
+                                       hw_if->get_tx_tstamp(pdata);
+                               schedule_work(&pdata->tx_tstamp_work);
+                       }
+               }
        }
 
        DBGPR("  DMA_ISR = %08x\n", XGMAC_IOREAD(pdata, DMA_ISR));
@@ -364,6 +387,7 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
        hw_feat->sph           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, SPHEN);
        hw_feat->tso           = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, TSOEN);
        hw_feat->dma_debug     = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, DBGMEMA);
+       hw_feat->tc_cnt        = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R, NUMTC);
        hw_feat->hash_table_size = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
                                                  HASHTBLSZ);
        hw_feat->l3l4_filter_num = XGMAC_GET_BITS(mac_hfr1, MAC_HWF1R,
@@ -377,6 +401,21 @@ void xgbe_get_all_hw_features(struct xgbe_prv_data *pdata)
        hw_feat->pps_out_num  = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, PPSOUTNUM);
        hw_feat->aux_snap_num = XGMAC_GET_BITS(mac_hfr2, MAC_HWF2R, AUXSNAPNUM);
 
+       /* Translate the Hash Table size into actual number */
+       switch (hw_feat->hash_table_size) {
+       case 0:
+               break;
+       case 1:
+               hw_feat->hash_table_size = 64;
+               break;
+       case 2:
+               hw_feat->hash_table_size = 128;
+               break;
+       case 3:
+               hw_feat->hash_table_size = 256;
+               break;
+       }
+
        /* The Queue and Channel counts are zero based so increment them
         * to get the actual number
         */
@@ -396,9 +435,12 @@ static void xgbe_napi_enable(struct xgbe_prv_data *pdata, unsigned int add)
        napi_enable(&pdata->napi);
 }
 
-static void xgbe_napi_disable(struct xgbe_prv_data *pdata)
+static void xgbe_napi_disable(struct xgbe_prv_data *pdata, unsigned int del)
 {
        napi_disable(&pdata->napi);
+
+       if (del)
+               netif_napi_del(&pdata->napi);
 }
 
 void xgbe_init_tx_coalesce(struct xgbe_prv_data *pdata)
@@ -446,7 +488,7 @@ static void xgbe_free_tx_skbuff(struct xgbe_prv_data *pdata)
                        break;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
                        desc_if->unmap_skb(pdata, rdata);
                }
        }
@@ -471,7 +513,7 @@ static void xgbe_free_rx_skbuff(struct xgbe_prv_data *pdata)
                        break;
 
                for (j = 0; j < ring->rdesc_count; j++) {
-                       rdata = GET_DESC_DATA(ring, j);
+                       rdata = XGBE_GET_DESC_DATA(ring, j);
                        desc_if->unmap_skb(pdata, rdata);
                }
        }
@@ -502,7 +544,7 @@ int xgbe_powerdown(struct net_device *netdev, unsigned int caller)
                netif_device_detach(netdev);
 
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata);
+       xgbe_napi_disable(pdata, 0);
 
        /* Powerdown Tx/Rx */
        hw_if->powerdown_tx(pdata);
@@ -591,7 +633,7 @@ static void xgbe_stop(struct xgbe_prv_data *pdata)
        phy_stop(pdata->phydev);
 
        netif_tx_stop_all_queues(netdev);
-       xgbe_napi_disable(pdata);
+       xgbe_napi_disable(pdata, 1);
 
        xgbe_stop_tx_timers(pdata);
 
@@ -639,6 +681,197 @@ static void xgbe_restart(struct work_struct *work)
        rtnl_unlock();
 }
 
+static void xgbe_tx_tstamp(struct work_struct *work)
+{
+       struct xgbe_prv_data *pdata = container_of(work,
+                                                  struct xgbe_prv_data,
+                                                  tx_tstamp_work);
+       struct skb_shared_hwtstamps hwtstamps;
+       u64 nsec;
+       unsigned long flags;
+
+       if (pdata->tx_tstamp) {
+               nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+                                           pdata->tx_tstamp);
+
+               memset(&hwtstamps, 0, sizeof(hwtstamps));
+               hwtstamps.hwtstamp = ns_to_ktime(nsec);
+               skb_tstamp_tx(pdata->tx_tstamp_skb, &hwtstamps);
+       }
+
+       dev_kfree_skb_any(pdata->tx_tstamp_skb);
+
+       spin_lock_irqsave(&pdata->tstamp_lock, flags);
+       pdata->tx_tstamp_skb = NULL;
+       spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+}
+
+static int xgbe_get_hwtstamp_settings(struct xgbe_prv_data *pdata,
+                                     struct ifreq *ifreq)
+{
+       if (copy_to_user(ifreq->ifr_data, &pdata->tstamp_config,
+                        sizeof(pdata->tstamp_config)))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int xgbe_set_hwtstamp_settings(struct xgbe_prv_data *pdata,
+                                     struct ifreq *ifreq)
+{
+       struct hwtstamp_config config;
+       unsigned int mac_tscr;
+
+       if (copy_from_user(&config, ifreq->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       if (config.flags)
+               return -EINVAL;
+
+       mac_tscr = 0;
+
+       switch (config.tx_type) {
+       case HWTSTAMP_TX_OFF:
+               break;
+
+       case HWTSTAMP_TX_ON:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       default:
+               return -ERANGE;
+       }
+
+       switch (config.rx_filter) {
+       case HWTSTAMP_FILTER_NONE:
+               break;
+
+       case HWTSTAMP_FILTER_ALL:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENALL, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* PTP v2, UDP, any kind of event packet */
+       case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+       /* PTP v1, UDP, any kind of event packet */
+       case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* PTP v2, UDP, Sync packet */
+       case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+       /* PTP v1, UDP, Sync packet */
+       case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* PTP v2, UDP, Delay_req packet */
+       case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+       /* PTP v1, UDP, Delay_req packet */
+       case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* 802.AS1, Ethernet, any kind of event packet */
+       case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* 802.AS1, Ethernet, Sync packet */
+       case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* 802.AS1, Ethernet, Delay_req packet */
+       case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, AV8021ASMEN, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* PTP v2/802.AS1, any layer, any kind of event packet */
+       case HWTSTAMP_FILTER_PTP_V2_EVENT:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, SNAPTYPSEL, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* PTP v2/802.AS1, any layer, Sync packet */
+       case HWTSTAMP_FILTER_PTP_V2_SYNC:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       /* PTP v2/802.AS1, any layer, Delay_req packet */
+       case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSVER2ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV4ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSIPV6ENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSMSTRENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSEVNTENA, 1);
+               XGMAC_SET_BITS(mac_tscr, MAC_TSCR, TSENA, 1);
+               break;
+
+       default:
+               return -ERANGE;
+       }
+
+       pdata->hw_if.config_tstamp(pdata, mac_tscr);
+
+       memcpy(&pdata->tstamp_config, &config, sizeof(config));
+
+       return 0;
+}
+
+static void xgbe_prep_tx_tstamp(struct xgbe_prv_data *pdata,
+                               struct sk_buff *skb,
+                               struct xgbe_packet_data *packet)
+{
+       unsigned long flags;
+
+       if (XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP)) {
+               spin_lock_irqsave(&pdata->tstamp_lock, flags);
+               if (pdata->tx_tstamp_skb) {
+                       /* Another timestamp in progress, ignore this one */
+                       XGMAC_SET_BITS(packet->attributes,
+                                      TX_PACKET_ATTRIBUTES, PTP, 0);
+               } else {
+                       pdata->tx_tstamp_skb = skb_get(skb);
+                       skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
+               }
+               spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+       }
+
+       if (!XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES, PTP))
+               skb_tx_timestamp(skb);
+}
+
 static void xgbe_prep_vlan(struct sk_buff *skb, struct xgbe_packet_data *packet)
 {
        if (vlan_tx_tag_present(skb))
@@ -682,7 +915,8 @@ static int xgbe_is_tso(struct sk_buff *skb)
        return 1;
 }
 
-static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
+static void xgbe_packet_info(struct xgbe_prv_data *pdata,
+                            struct xgbe_ring *ring, struct sk_buff *skb,
                             struct xgbe_packet_data *packet)
 {
        struct skb_frag_struct *frag;
@@ -724,16 +958,21 @@ static void xgbe_packet_info(struct xgbe_ring *ring, struct sk_buff *skb,
                               VLAN_CTAG, 1);
        }
 
+       if ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
+           (pdata->tstamp_config.tx_type == HWTSTAMP_TX_ON))
+               XGMAC_SET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
+                              PTP, 1);
+
        for (len = skb_headlen(skb); len;) {
                packet->rdesc_count++;
-               len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+               len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
        }
 
        for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
                frag = &skb_shinfo(skb)->frags[i];
                for (len = skb_frag_size(frag); len; ) {
                        packet->rdesc_count++;
-                       len -= min_t(unsigned int, len, TX_MAX_BUF_SIZE);
+                       len -= min_t(unsigned int, len, XGBE_TX_MAX_BUF_SIZE);
                }
        }
 }
@@ -747,26 +986,33 @@ static int xgbe_open(struct net_device *netdev)
 
        DBGPR("-->xgbe_open\n");
 
-       /* Enable the clock */
-       ret = clk_prepare_enable(pdata->sysclock);
+       /* Enable the clocks */
+       ret = clk_prepare_enable(pdata->sysclk);
        if (ret) {
-               netdev_alert(netdev, "clk_prepare_enable failed\n");
+               netdev_alert(netdev, "dma clk_prepare_enable failed\n");
                return ret;
        }
 
+       ret = clk_prepare_enable(pdata->ptpclk);
+       if (ret) {
+               netdev_alert(netdev, "ptp clk_prepare_enable failed\n");
+               goto err_sysclk;
+       }
+
        /* Calculate the Rx buffer size before allocating rings */
        ret = xgbe_calc_rx_buf_size(netdev, netdev->mtu);
        if (ret < 0)
-               goto err_clk;
+               goto err_ptpclk;
        pdata->rx_buf_size = ret;
 
        /* Allocate the ring descriptors and buffers */
        ret = desc_if->alloc_ring_resources(pdata);
        if (ret)
-               goto err_clk;
+               goto err_ptpclk;
 
-       /* Initialize the device restart work struct */
+       /* Initialize the device restart and Tx timestamp work struct */
        INIT_WORK(&pdata->restart_work, xgbe_restart);
+       INIT_WORK(&pdata->tx_tstamp_work, xgbe_tx_tstamp);
 
        /* Request interrupts */
        ret = devm_request_irq(pdata->dev, netdev->irq, xgbe_isr, 0,
@@ -795,8 +1041,11 @@ err_start:
 err_irq:
        desc_if->free_ring_resources(pdata);
 
-err_clk:
-       clk_disable_unprepare(pdata->sysclock);
+err_ptpclk:
+       clk_disable_unprepare(pdata->ptpclk);
+
+err_sysclk:
+       clk_disable_unprepare(pdata->sysclk);
 
        return ret;
 }
@@ -824,8 +1073,9 @@ static int xgbe_close(struct net_device *netdev)
                pdata->irq_number = 0;
        }
 
-       /* Disable the clock */
-       clk_disable_unprepare(pdata->sysclock);
+       /* Disable the clocks */
+       clk_disable_unprepare(pdata->ptpclk);
+       clk_disable_unprepare(pdata->sysclk);
 
        DBGPR("<--xgbe_close\n");
 
@@ -861,7 +1111,7 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
 
        /* Calculate preliminary packet info */
        memset(packet, 0, sizeof(*packet));
-       xgbe_packet_info(ring, skb, packet);
+       xgbe_packet_info(pdata, ring, skb, packet);
 
        /* Check that there are enough descriptors available */
        if (packet->rdesc_count > xgbe_tx_avail_desc(ring)) {
@@ -885,6 +1135,8 @@ static int xgbe_xmit(struct sk_buff *skb, struct net_device *netdev)
                goto tx_netdev_return;
        }
 
+       xgbe_prep_tx_tstamp(pdata, skb, packet);
+
        /* Configure required descriptor fields for transmission */
        hw_if->pre_xmit(channel);
 
@@ -911,18 +1163,10 @@ static void xgbe_set_rx_mode(struct net_device *netdev)
        pr_mode = ((netdev->flags & IFF_PROMISC) != 0);
        am_mode = ((netdev->flags & IFF_ALLMULTI) != 0);
 
-       if (netdev_uc_count(netdev) > pdata->hw_feat.addn_mac)
-               pr_mode = 1;
-       if (netdev_mc_count(netdev) > pdata->hw_feat.addn_mac)
-               am_mode = 1;
-       if ((netdev_uc_count(netdev) + netdev_mc_count(netdev)) >
-            pdata->hw_feat.addn_mac)
-               pr_mode = 1;
-
        hw_if->set_promiscuous_mode(pdata, pr_mode);
        hw_if->set_all_multicast_mode(pdata, am_mode);
-       if (!pr_mode)
-               hw_if->set_addn_mac_addrs(pdata, am_mode);
+
+       hw_if->add_mac_addresses(pdata);
 
        DBGPR("<--xgbe_set_rx_mode\n");
 }
@@ -947,6 +1191,27 @@ static int xgbe_set_mac_address(struct net_device *netdev, void *addr)
        return 0;
 }
 
+static int xgbe_ioctl(struct net_device *netdev, struct ifreq *ifreq, int cmd)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       int ret;
+
+       switch (cmd) {
+       case SIOCGHWTSTAMP:
+               ret = xgbe_get_hwtstamp_settings(pdata, ifreq);
+               break;
+
+       case SIOCSHWTSTAMP:
+               ret = xgbe_set_hwtstamp_settings(pdata, ifreq);
+               break;
+
+       default:
+               ret = -EOPNOTSUPP;
+       }
+
+       return ret;
+}
+
 static int xgbe_change_mtu(struct net_device *netdev, int mtu)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
@@ -999,6 +1264,38 @@ static struct rtnl_link_stats64 *xgbe_get_stats64(struct net_device *netdev,
        return s;
 }
 
+static int xgbe_vlan_rx_add_vid(struct net_device *netdev, __be16 proto,
+                               u16 vid)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+       DBGPR("-->%s\n", __func__);
+
+       set_bit(vid, pdata->active_vlans);
+       hw_if->update_vlan_hash_table(pdata);
+
+       DBGPR("<--%s\n", __func__);
+
+       return 0;
+}
+
+static int xgbe_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto,
+                                u16 vid)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       struct xgbe_hw_if *hw_if = &pdata->hw_if;
+
+       DBGPR("-->%s\n", __func__);
+
+       clear_bit(vid, pdata->active_vlans);
+       hw_if->update_vlan_hash_table(pdata);
+
+       DBGPR("<--%s\n", __func__);
+
+       return 0;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void xgbe_poll_controller(struct net_device *netdev)
 {
@@ -1016,31 +1313,58 @@ static void xgbe_poll_controller(struct net_device *netdev)
 }
 #endif /* End CONFIG_NET_POLL_CONTROLLER */
 
+static int xgbe_setup_tc(struct net_device *netdev, u8 tc)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+       unsigned int offset, queue;
+       u8 i;
+
+       if (tc && (tc != pdata->hw_feat.tc_cnt))
+               return -EINVAL;
+
+       if (tc) {
+               netdev_set_num_tc(netdev, tc);
+               for (i = 0, queue = 0, offset = 0; i < tc; i++) {
+                       while ((queue < pdata->tx_q_count) &&
+                              (pdata->q2tc_map[queue] == i))
+                               queue++;
+
+                       DBGPR("  TC%u using TXq%u-%u\n", i, offset, queue - 1);
+                       netdev_set_tc_queue(netdev, i, queue - offset, offset);
+                       offset = queue;
+               }
+       } else {
+               netdev_reset_tc(netdev);
+       }
+
+       return 0;
+}
+
 static int xgbe_set_features(struct net_device *netdev,
                             netdev_features_t features)
 {
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       unsigned int rxcsum_enabled, rxvlan_enabled;
+       unsigned int rxcsum, rxvlan, rxvlan_filter;
 
-       rxcsum_enabled = !!(pdata->netdev_features & NETIF_F_RXCSUM);
-       rxvlan_enabled = !!(pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX);
+       rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
+       rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
+       rxvlan_filter = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_FILTER;
 
-       if ((features & NETIF_F_RXCSUM) && !rxcsum_enabled) {
+       if ((features & NETIF_F_RXCSUM) && !rxcsum)
                hw_if->enable_rx_csum(pdata);
-               netdev_alert(netdev, "state change - rxcsum enabled\n");
-       } else if (!(features & NETIF_F_RXCSUM) && rxcsum_enabled) {
+       else if (!(features & NETIF_F_RXCSUM) && rxcsum)
                hw_if->disable_rx_csum(pdata);
-               netdev_alert(netdev, "state change - rxcsum disabled\n");
-       }
 
-       if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan_enabled) {
+       if ((features & NETIF_F_HW_VLAN_CTAG_RX) && !rxvlan)
                hw_if->enable_rx_vlan_stripping(pdata);
-               netdev_alert(netdev, "state change - rxvlan enabled\n");
-       } else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan_enabled) {
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_RX) && rxvlan)
                hw_if->disable_rx_vlan_stripping(pdata);
-               netdev_alert(netdev, "state change - rxvlan disabled\n");
-       }
+
+       if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) && !rxvlan_filter)
+               hw_if->enable_rx_vlan_filtering(pdata);
+       else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) && rxvlan_filter)
+               hw_if->disable_rx_vlan_filtering(pdata);
 
        pdata->netdev_features = features;
 
@@ -1056,11 +1380,15 @@ static const struct net_device_ops xgbe_netdev_ops = {
        .ndo_set_rx_mode        = xgbe_set_rx_mode,
        .ndo_set_mac_address    = xgbe_set_mac_address,
        .ndo_validate_addr      = eth_validate_addr,
+       .ndo_do_ioctl           = xgbe_ioctl,
        .ndo_change_mtu         = xgbe_change_mtu,
        .ndo_get_stats64        = xgbe_get_stats64,
+       .ndo_vlan_rx_add_vid    = xgbe_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = xgbe_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = xgbe_poll_controller,
 #endif
+       .ndo_setup_tc           = xgbe_setup_tc,
        .ndo_set_features       = xgbe_set_features,
 };
 
@@ -1069,6 +1397,22 @@ struct net_device_ops *xgbe_get_netdev_ops(void)
        return (struct net_device_ops *)&xgbe_netdev_ops;
 }
 
+static void xgbe_rx_refresh(struct xgbe_channel *channel)
+{
+       struct xgbe_prv_data *pdata = channel->pdata;
+       struct xgbe_desc_if *desc_if = &pdata->desc_if;
+       struct xgbe_ring *ring = channel->rx_ring;
+       struct xgbe_ring_data *rdata;
+
+       desc_if->realloc_skb(channel);
+
+       /* Update the Rx Tail Pointer Register with address of
+        * the last cleaned entry */
+       rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
+       XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
+                         lower_32_bits(rdata->rdesc_dma));
+}
+
 static int xgbe_tx_poll(struct xgbe_channel *channel)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
@@ -1089,8 +1433,9 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
 
        spin_lock_irqsave(&ring->lock, flags);
 
-       while ((processed < TX_DESC_MAX_PROC) && (ring->dirty < ring->cur)) {
-               rdata = GET_DESC_DATA(ring, ring->dirty);
+       while ((processed < XGBE_TX_DESC_MAX_PROC) &&
+              (ring->dirty < ring->cur)) {
+               rdata = XGBE_GET_DESC_DATA(ring, ring->dirty);
                rdesc = rdata->rdesc;
 
                if (!hw_if->tx_complete(rdesc))
@@ -1109,7 +1454,7 @@ static int xgbe_tx_poll(struct xgbe_channel *channel)
        }
 
        if ((ring->tx.queue_stopped == 1) &&
-           (xgbe_tx_avail_desc(ring) > TX_DESC_MIN_FREE)) {
+           (xgbe_tx_avail_desc(ring) > XGBE_TX_DESC_MIN_FREE)) {
                ring->tx.queue_stopped = 0;
                netif_wake_subqueue(netdev, channel->queue_index);
        }
@@ -1125,14 +1470,14 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
 {
        struct xgbe_prv_data *pdata = channel->pdata;
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
-       struct xgbe_desc_if *desc_if = &pdata->desc_if;
        struct xgbe_ring *ring = channel->rx_ring;
        struct xgbe_ring_data *rdata;
        struct xgbe_packet_data *packet;
        struct net_device *netdev = pdata->netdev;
        struct sk_buff *skb;
-       unsigned int incomplete, error;
-       unsigned int cur_len, put_len, max_len;
+       struct skb_shared_hwtstamps *hwtstamps;
+       unsigned int incomplete, error, context_next, context;
+       unsigned int len, put_len, max_len;
        int received = 0;
 
        DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
@@ -1141,18 +1486,32 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
        if (!ring)
                return 0;
 
+       rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
        packet = &ring->packet_data;
        while (received < budget) {
                DBGPR("  cur = %d\n", ring->cur);
 
-               /* Clear the packet data information */
-               memset(packet, 0, sizeof(*packet));
-               skb = NULL;
-               error = 0;
-               cur_len = 0;
+               /* First time in loop see if we need to restore state */
+               if (!received && rdata->state_saved) {
+                       incomplete = rdata->state.incomplete;
+                       context_next = rdata->state.context_next;
+                       skb = rdata->state.skb;
+                       error = rdata->state.error;
+                       len = rdata->state.len;
+               } else {
+                       memset(packet, 0, sizeof(*packet));
+                       incomplete = 0;
+                       context_next = 0;
+                       skb = NULL;
+                       error = 0;
+                       len = 0;
+               }
 
 read_again:
-               rdata = GET_DESC_DATA(ring, ring->cur);
+               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+
+               if (ring->dirty > (XGBE_RX_DESC_CNT >> 3))
+                       xgbe_rx_refresh(channel);
 
                if (hw_if->dev_read(channel))
                        break;
@@ -1168,9 +1527,15 @@ read_again:
                incomplete = XGMAC_GET_BITS(packet->attributes,
                                            RX_PACKET_ATTRIBUTES,
                                            INCOMPLETE);
+               context_next = XGMAC_GET_BITS(packet->attributes,
+                                             RX_PACKET_ATTRIBUTES,
+                                             CONTEXT_NEXT);
+               context = XGMAC_GET_BITS(packet->attributes,
+                                        RX_PACKET_ATTRIBUTES,
+                                        CONTEXT);
 
                /* Earlier error, just drain the remaining data */
-               if (incomplete && error)
+               if ((incomplete || context_next) && error)
                        goto read_again;
 
                if (error || packet->errors) {
@@ -1180,30 +1545,37 @@ read_again:
                        continue;
                }
 
-               put_len = rdata->len - cur_len;
-               if (skb) {
-                       if (pskb_expand_head(skb, 0, put_len, GFP_ATOMIC)) {
-                               DBGPR("pskb_expand_head error\n");
-                               if (incomplete) {
-                                       error = 1;
-                                       goto read_again;
+               if (!context) {
+                       put_len = rdata->len - len;
+                       if (skb) {
+                               if (pskb_expand_head(skb, 0, put_len,
+                                                    GFP_ATOMIC)) {
+                                       DBGPR("pskb_expand_head error\n");
+                                       if (incomplete) {
+                                               error = 1;
+                                               goto read_again;
+                                       }
+
+                                       dev_kfree_skb(skb);
+                                       continue;
                                }
-
-                               dev_kfree_skb(skb);
-                               continue;
+                               memcpy(skb_tail_pointer(skb), rdata->skb->data,
+                                      put_len);
+                       } else {
+                               skb = rdata->skb;
+                               rdata->skb = NULL;
                        }
-                       memcpy(skb_tail_pointer(skb), rdata->skb->data,
-                              put_len);
-               } else {
-                       skb = rdata->skb;
-                       rdata->skb = NULL;
+                       skb_put(skb, put_len);
+                       len += put_len;
                }
-               skb_put(skb, put_len);
-               cur_len += put_len;
 
-               if (incomplete)
+               if (incomplete || context_next)
                        goto read_again;
 
+               /* Stray Context Descriptor? */
+               if (!skb)
+                       continue;
+
                /* Be sure we don't exceed the configured MTU */
                max_len = netdev->mtu + ETH_HLEN;
                if (!(netdev->features & NETIF_F_HW_VLAN_CTAG_RX) &&
@@ -1230,6 +1602,16 @@ read_again:
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q),
                                               packet->vlan_ctag);
 
+               if (XGMAC_GET_BITS(packet->attributes,
+                                  RX_PACKET_ATTRIBUTES, RX_TSTAMP)) {
+                       u64 nsec;
+
+                       nsec = timecounter_cyc2time(&pdata->tstamp_tc,
+                                                   packet->rx_tstamp);
+                       hwtstamps = skb_hwtstamps(skb);
+                       hwtstamps->hwtstamp = ns_to_ktime(nsec);
+               }
+
                skb->dev = netdev;
                skb->protocol = eth_type_trans(skb, netdev);
                skb_record_rx_queue(skb, channel->queue_index);
@@ -1239,14 +1621,15 @@ read_again:
                napi_gro_receive(&pdata->napi, skb);
        }
 
-       if (received) {
-               desc_if->realloc_skb(channel);
-
-               /* Update the Rx Tail Pointer Register with address of
-                * the last cleaned entry */
-               rdata = GET_DESC_DATA(ring, ring->rx.realloc_index - 1);
-               XGMAC_DMA_IOWRITE(channel, DMA_CH_RDTR_LO,
-                                 lower_32_bits(rdata->rdesc_dma));
+       /* Check if we need to save state before leaving */
+       if (received && (incomplete || context_next)) {
+               rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
+               rdata->state_saved = 1;
+               rdata->state.incomplete = incomplete;
+               rdata->state.context_next = context_next;
+               rdata->state.skb = skb;
+               rdata->state.len = len;
+               rdata->state.error = error;
        }
 
        DBGPR("<--xgbe_rx_poll: received = %d\n", received);
@@ -1259,21 +1642,28 @@ static int xgbe_poll(struct napi_struct *napi, int budget)
        struct xgbe_prv_data *pdata = container_of(napi, struct xgbe_prv_data,
                                                   napi);
        struct xgbe_channel *channel;
-       int processed;
+       int ring_budget;
+       int processed, last_processed;
        unsigned int i;
 
        DBGPR("-->xgbe_poll: budget=%d\n", budget);
 
-       /* Cleanup Tx ring first */
-       channel = pdata->channel;
-       for (i = 0; i < pdata->channel_count; i++, channel++)
-               xgbe_tx_poll(channel);
-
-       /* Process Rx ring next */
        processed = 0;
-       channel = pdata->channel;
-       for (i = 0; i < pdata->channel_count; i++, channel++)
-               processed += xgbe_rx_poll(channel, budget - processed);
+       ring_budget = budget / pdata->rx_ring_count;
+       do {
+               last_processed = processed;
+
+               channel = pdata->channel;
+               for (i = 0; i < pdata->channel_count; i++, channel++) {
+                       /* Cleanup Tx ring first */
+                       xgbe_tx_poll(channel);
+
+                       /* Process Rx ring next */
+                       if (ring_budget > (budget - processed))
+                               ring_budget = budget - processed;
+                       processed += xgbe_rx_poll(channel, ring_budget);
+               }
+       } while ((processed < budget) && (processed != last_processed));
 
        /* If we processed everything, we are done */
        if (processed < budget) {
@@ -1296,7 +1686,7 @@ void xgbe_dump_tx_desc(struct xgbe_ring *ring, unsigned int idx,
        struct xgbe_ring_desc *rdesc;
 
        while (count--) {
-               rdata = GET_DESC_DATA(ring, idx);
+               rdata = XGBE_GET_DESC_DATA(ring, idx);
                rdesc = rdata->rdesc;
                DBGPR("TX_NORMAL_DESC[%d %s] = %08x:%08x:%08x:%08x\n", idx,
                      (flag == 1) ? "QUEUED FOR TX" : "TX BY DEVICE",
index 8909f2b..a076aca 100644 (file)
 
 #include <linux/spinlock.h>
 #include <linux/phy.h>
+#include <linux/net_tstamp.h>
 
 #include "xgbe.h"
 #include "xgbe-common.h"
@@ -289,13 +290,9 @@ static int xgbe_get_settings(struct net_device *netdev,
        if (!pdata->phydev)
                return -ENODEV;
 
-       spin_lock_irq(&pdata->lock);
-
        ret = phy_ethtool_gset(pdata->phydev, cmd);
        cmd->transceiver = XCVR_EXTERNAL;
 
-       spin_unlock_irq(&pdata->lock);
-
        DBGPR("<--xgbe_get_settings\n");
 
        return ret;
@@ -314,36 +311,32 @@ static int xgbe_set_settings(struct net_device *netdev,
        if (!pdata->phydev)
                return -ENODEV;
 
-       spin_lock_irq(&pdata->lock);
-
        speed = ethtool_cmd_speed(cmd);
 
-       ret = -EINVAL;
        if (cmd->phy_address != phydev->addr)
-               goto unlock;
+               return -EINVAL;
 
        if ((cmd->autoneg != AUTONEG_ENABLE) &&
            (cmd->autoneg != AUTONEG_DISABLE))
-               goto unlock;
+               return -EINVAL;
 
-       if ((cmd->autoneg == AUTONEG_DISABLE) &&
-           (((speed != SPEED_10000) && (speed != SPEED_1000)) ||
-            (cmd->duplex != DUPLEX_FULL)))
-               goto unlock;
+       if (cmd->autoneg == AUTONEG_DISABLE) {
+               switch (speed) {
+               case SPEED_10000:
+               case SPEED_2500:
+               case SPEED_1000:
+                       break;
+               default:
+                       return -EINVAL;
+               }
 
-       if (cmd->autoneg == AUTONEG_ENABLE) {
-               /* Clear settings needed to force speeds */
-               phydev->supported &= ~SUPPORTED_1000baseT_Full;
-               phydev->supported &= ~SUPPORTED_10000baseT_Full;
-       } else {
-               /* Add settings needed to force speed */
-               phydev->supported |= SUPPORTED_1000baseT_Full;
-               phydev->supported |= SUPPORTED_10000baseT_Full;
+               if (cmd->duplex != DUPLEX_FULL)
+                       return -EINVAL;
        }
 
        cmd->advertising &= phydev->supported;
        if ((cmd->autoneg == AUTONEG_ENABLE) && !cmd->advertising)
-               goto unlock;
+               return -EINVAL;
 
        ret = 0;
        phydev->autoneg = cmd->autoneg;
@@ -359,9 +352,6 @@ static int xgbe_set_settings(struct net_device *netdev,
        if (netif_running(netdev))
                ret = phy_start_aneg(phydev);
 
-unlock:
-       spin_unlock_irq(&pdata->lock);
-
        DBGPR("<--xgbe_set_settings\n");
 
        return ret;
@@ -490,6 +480,39 @@ static int xgbe_set_coalesce(struct net_device *netdev,
        return 0;
 }
 
+static int xgbe_get_ts_info(struct net_device *netdev,
+                           struct ethtool_ts_info *ts_info)
+{
+       struct xgbe_prv_data *pdata = netdev_priv(netdev);
+
+       ts_info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+                                  SOF_TIMESTAMPING_RX_SOFTWARE |
+                                  SOF_TIMESTAMPING_SOFTWARE |
+                                  SOF_TIMESTAMPING_TX_HARDWARE |
+                                  SOF_TIMESTAMPING_RX_HARDWARE |
+                                  SOF_TIMESTAMPING_RAW_HARDWARE;
+
+       if (pdata->ptp_clock)
+               ts_info->phc_index = ptp_clock_index(pdata->ptp_clock);
+       else
+               ts_info->phc_index = -1;
+
+       ts_info->tx_types = (1 << HWTSTAMP_TX_OFF) | (1 << HWTSTAMP_TX_ON);
+       ts_info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+                             (1 << HWTSTAMP_FILTER_PTP_V1_L4_EVENT) |
+                             (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) |
+                             (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) |
+                             (1 << HWTSTAMP_FILTER_PTP_V2_L4_EVENT) |
+                             (1 << HWTSTAMP_FILTER_PTP_V2_L4_SYNC) |
+                             (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) |
+                             (1 << HWTSTAMP_FILTER_PTP_V2_EVENT) |
+                             (1 << HWTSTAMP_FILTER_PTP_V2_SYNC) |
+                             (1 << HWTSTAMP_FILTER_PTP_V2_DELAY_REQ) |
+                             (1 << HWTSTAMP_FILTER_ALL);
+
+       return 0;
+}
+
 static const struct ethtool_ops xgbe_ethtool_ops = {
        .get_settings = xgbe_get_settings,
        .set_settings = xgbe_set_settings,
@@ -502,6 +525,7 @@ static const struct ethtool_ops xgbe_ethtool_ops = {
        .get_strings = xgbe_get_strings,
        .get_ethtool_stats = xgbe_get_ethtool_stats,
        .get_sset_count = xgbe_get_sset_count,
+       .get_ts_info = xgbe_get_ts_info,
 };
 
 struct ethtool_ops *xgbe_get_ethtool_ops(void)
index c83584a..ec977d3 100644 (file)
@@ -245,18 +245,19 @@ static int xgbe_probe(struct platform_device *pdev)
 
        spin_lock_init(&pdata->lock);
        mutex_init(&pdata->xpcs_mutex);
+       spin_lock_init(&pdata->tstamp_lock);
 
        /* Set and validate the number of descriptors for a ring */
-       BUILD_BUG_ON_NOT_POWER_OF_2(TX_DESC_CNT);
-       pdata->tx_desc_count = TX_DESC_CNT;
+       BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
+       pdata->tx_desc_count = XGBE_TX_DESC_CNT;
        if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
                dev_err(dev, "tx descriptor count (%d) is not valid\n",
                        pdata->tx_desc_count);
                ret = -EINVAL;
                goto err_io;
        }
-       BUILD_BUG_ON_NOT_POWER_OF_2(RX_DESC_CNT);
-       pdata->rx_desc_count = RX_DESC_CNT;
+       BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
+       pdata->rx_desc_count = XGBE_RX_DESC_CNT;
        if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
                dev_err(dev, "rx descriptor count (%d) is not valid\n",
                        pdata->rx_desc_count);
@@ -265,10 +266,18 @@ static int xgbe_probe(struct platform_device *pdev)
        }
 
        /* Obtain the system clock setting */
-       pdata->sysclock = devm_clk_get(dev, NULL);
-       if (IS_ERR(pdata->sysclock)) {
-               dev_err(dev, "devm_clk_get failed\n");
-               ret = PTR_ERR(pdata->sysclock);
+       pdata->sysclk = devm_clk_get(dev, XGBE_DMA_CLOCK);
+       if (IS_ERR(pdata->sysclk)) {
+               dev_err(dev, "dma devm_clk_get failed\n");
+               ret = PTR_ERR(pdata->sysclk);
+               goto err_io;
+       }
+
+       /* Obtain the PTP clock setting */
+       pdata->ptpclk = devm_clk_get(dev, XGBE_PTP_CLOCK);
+       if (IS_ERR(pdata->ptpclk)) {
+               dev_err(dev, "ptp devm_clk_get failed\n");
+               ret = PTR_ERR(pdata->ptpclk);
                goto err_io;
        }
 
@@ -297,6 +306,16 @@ static int xgbe_probe(struct platform_device *pdev)
        *(dev->dma_mask) = DMA_BIT_MASK(40);
        dev->coherent_dma_mask = DMA_BIT_MASK(40);
 
+       if (of_property_read_bool(dev->of_node, "dma-coherent")) {
+               pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
+               pdata->arcache = XGBE_DMA_OS_ARCACHE;
+               pdata->awcache = XGBE_DMA_OS_AWCACHE;
+       } else {
+               pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
+               pdata->arcache = XGBE_DMA_SYS_ARCACHE;
+               pdata->awcache = XGBE_DMA_SYS_AWCACHE;
+       }
+
        ret = platform_get_irq(pdev, 0);
        if (ret < 0) {
                dev_err(dev, "platform_get_irq failed\n");
@@ -336,10 +355,18 @@ static int xgbe_probe(struct platform_device *pdev)
        /* Set default configuration data */
        xgbe_default_config(pdata);
 
-       /* Calculate the number of Tx and Rx rings to be created */
+       /* Calculate the number of Tx and Rx rings to be created
+        *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
+        *   the number of Tx queues to the number of Tx channels
+        *   enabled
+        *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
+        *   number of Rx queues
+        */
        pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
                                     pdata->hw_feat.tx_ch_cnt);
-       if (netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count)) {
+       pdata->tx_q_count = pdata->tx_ring_count;
+       ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
+       if (ret) {
                dev_err(dev, "error setting real tx queue count\n");
                goto err_io;
        }
@@ -347,6 +374,7 @@ static int xgbe_probe(struct platform_device *pdev)
        pdata->rx_ring_count = min_t(unsigned int,
                                     netif_get_num_default_rss_queues(),
                                     pdata->hw_feat.rx_ch_cnt);
+       pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
        ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
        if (ret) {
                dev_err(dev, "error setting real rx queue count\n");
@@ -372,9 +400,12 @@ static int xgbe_probe(struct platform_device *pdev)
        if (ret)
                goto err_bus_id;
 
-       /* Set network and ethtool operations */
+       /* Set device operations */
        netdev->netdev_ops = xgbe_get_netdev_ops();
        netdev->ethtool_ops = xgbe_get_ethtool_ops();
+#ifdef CONFIG_AMD_XGBE_DCB
+       netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
+#endif
 
        /* Set device features */
        netdev->hw_features = NETIF_F_SG |
@@ -385,7 +416,8 @@ static int xgbe_probe(struct platform_device *pdev)
                              NETIF_F_TSO6 |
                              NETIF_F_GRO |
                              NETIF_F_HW_VLAN_CTAG_RX |
-                             NETIF_F_HW_VLAN_CTAG_TX;
+                             NETIF_F_HW_VLAN_CTAG_TX |
+                             NETIF_F_HW_VLAN_CTAG_FILTER;
 
        netdev->vlan_features |= NETIF_F_SG |
                                 NETIF_F_IP_CSUM |
@@ -396,6 +428,8 @@ static int xgbe_probe(struct platform_device *pdev)
        netdev->features |= netdev->hw_features;
        pdata->netdev_features = netdev->features;
 
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+
        xgbe_init_rx_coalesce(pdata);
        xgbe_init_tx_coalesce(pdata);
 
@@ -406,6 +440,8 @@ static int xgbe_probe(struct platform_device *pdev)
                goto err_reg_netdev;
        }
 
+       xgbe_ptp_register(pdata);
+
        xgbe_debugfs_init(pdata);
 
        netdev_notice(netdev, "net device enabled\n");
@@ -438,6 +474,8 @@ static int xgbe_remove(struct platform_device *pdev)
 
        xgbe_debugfs_exit(pdata);
 
+       xgbe_ptp_unregister(pdata);
+
        unregister_netdev(netdev);
 
        xgbe_mdio_unregister(pdata);
index ea7a5d6..eecd360 100644 (file)
@@ -163,7 +163,6 @@ static void xgbe_adjust_link(struct net_device *netdev)
        struct xgbe_prv_data *pdata = netdev_priv(netdev);
        struct xgbe_hw_if *hw_if = &pdata->hw_if;
        struct phy_device *phydev = pdata->phydev;
-       unsigned long flags;
        int new_state = 0;
 
        if (phydev == NULL)
@@ -172,8 +171,6 @@ static void xgbe_adjust_link(struct net_device *netdev)
        DBGPR_MDIO("-->xgbe_adjust_link: address=%d, newlink=%d, curlink=%d\n",
                   phydev->addr, phydev->link, pdata->phy_link);
 
-       spin_lock_irqsave(&pdata->lock, flags);
-
        if (phydev->link) {
                /* Flow control support */
                if (pdata->pause_autoneg) {
@@ -229,8 +226,6 @@ static void xgbe_adjust_link(struct net_device *netdev)
        if (new_state)
                phy_print_status(phydev);
 
-       spin_unlock_irqrestore(&pdata->lock, flags);
-
        DBGPR_MDIO("<--xgbe_adjust_link\n");
 }
 
@@ -375,10 +370,6 @@ int xgbe_mdio_register(struct xgbe_prv_data *pdata)
 
        phydev->autoneg = pdata->default_autoneg;
        if (phydev->autoneg == AUTONEG_DISABLE) {
-               /* Add settings needed to force speed */
-               phydev->supported |= SUPPORTED_1000baseT_Full;
-               phydev->supported |= SUPPORTED_10000baseT_Full;
-
                phydev->speed = pdata->default_speed;
                phydev->duplex = DUPLEX_FULL;
 
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c b/drivers/net/ethernet/amd/xgbe/xgbe-ptp.c
new file mode 100644 (file)
index 0000000..37e64cf
--- /dev/null
@@ -0,0 +1,285 @@
+/*
+ * AMD 10Gb Ethernet driver
+ *
+ * This file is available to you under your choice of the following two
+ * licenses:
+ *
+ * License 1: GPLv2
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ *
+ * This file is free software; you may copy, redistribute and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or (at
+ * your option) any later version.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ *
+ * License 2: Modified BSD
+ *
+ * Copyright (c) 2014 Advanced Micro Devices, Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *     * Redistributions of source code must retain the above copyright
+ *       notice, this list of conditions and the following disclaimer.
+ *     * Redistributions in binary form must reproduce the above copyright
+ *       notice, this list of conditions and the following disclaimer in the
+ *       documentation and/or other materials provided with the distribution.
+ *     * Neither the name of Advanced Micro Devices, Inc. nor the
+ *       names of its contributors may be used to endorse or promote products
+ *       derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * This file incorporates work covered by the following copyright and
+ * permission notice:
+ *     The Synopsys DWC ETHER XGMAC Software Driver and documentation
+ *     (hereinafter "Software") is an unsupported proprietary work of Synopsys,
+ *     Inc. unless otherwise expressly agreed to in writing between Synopsys
+ *     and you.
+ *
+ *     The Software IS NOT an item of Licensed Software or Licensed Product
+ *     under any End User Software License Agreement or Agreement for Licensed
+ *     Product with Synopsys or any supplement thereto.  Permission is hereby
+ *     granted, free of charge, to any person obtaining a copy of this software
+ *     annotated with this license and the Software, to deal in the Software
+ *     without restriction, including without limitation the rights to use,
+ *     copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+ *     of the Software, and to permit persons to whom the Software is furnished
+ *     to do so, subject to the following conditions:
+ *
+ *     The above copyright notice and this permission notice shall be included
+ *     in all copies or substantial portions of the Software.
+ *
+ *     THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
+ *     BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ *     TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+ *     PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
+ *     BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ *     CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ *     SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ *     INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ *     CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ *     ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+ *     THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <linux/clk.h>
+#include <linux/clocksource.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+
+#include "xgbe.h"
+#include "xgbe-common.h"
+
+
+static cycle_t xgbe_cc_read(const struct cyclecounter *cc)
+{
+       struct xgbe_prv_data *pdata = container_of(cc,
+                                                  struct xgbe_prv_data,
+                                                  tstamp_cc);
+       u64 nsec;
+
+       nsec = pdata->hw_if.get_tstamp_time(pdata);
+
+       return nsec;
+}
+
+static int xgbe_adjfreq(struct ptp_clock_info *info, s32 delta)
+{
+       struct xgbe_prv_data *pdata = container_of(info,
+                                                  struct xgbe_prv_data,
+                                                  ptp_clock_info);
+       unsigned long flags;
+       u64 adjust;
+       u32 addend, diff;
+       unsigned int neg_adjust = 0;
+
+       if (delta < 0) {
+               neg_adjust = 1;
+               delta = -delta;
+       }
+
+       adjust = pdata->tstamp_addend;
+       adjust *= delta;
+       diff = div_u64(adjust, 1000000000UL);
+
+       addend = (neg_adjust) ? pdata->tstamp_addend - diff :
+                               pdata->tstamp_addend + diff;
+
+       spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+       pdata->hw_if.update_tstamp_addend(pdata, addend);
+
+       spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+       return 0;
+}
+
+static int xgbe_adjtime(struct ptp_clock_info *info, s64 delta)
+{
+       struct xgbe_prv_data *pdata = container_of(info,
+                                                  struct xgbe_prv_data,
+                                                  ptp_clock_info);
+       unsigned long flags;
+       u64 nsec;
+
+       spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+       nsec = timecounter_read(&pdata->tstamp_tc);
+
+       nsec += delta;
+       timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+       spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+       return 0;
+}
+
+static int xgbe_gettime(struct ptp_clock_info *info, struct timespec *ts)
+{
+       struct xgbe_prv_data *pdata = container_of(info,
+                                                  struct xgbe_prv_data,
+                                                  ptp_clock_info);
+       unsigned long flags;
+       u64 nsec;
+
+       spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+       nsec = timecounter_read(&pdata->tstamp_tc);
+
+       spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+       *ts = ns_to_timespec(nsec);
+
+       return 0;
+}
+
+static int xgbe_settime(struct ptp_clock_info *info, const struct timespec *ts)
+{
+       struct xgbe_prv_data *pdata = container_of(info,
+                                                  struct xgbe_prv_data,
+                                                  ptp_clock_info);
+       unsigned long flags;
+       u64 nsec;
+
+       nsec = timespec_to_ns(ts);
+
+       spin_lock_irqsave(&pdata->tstamp_lock, flags);
+
+       timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc, nsec);
+
+       spin_unlock_irqrestore(&pdata->tstamp_lock, flags);
+
+       return 0;
+}
+
+static int xgbe_enable(struct ptp_clock_info *info,
+                      struct ptp_clock_request *request, int on)
+{
+       return -EOPNOTSUPP;
+}
+
+void xgbe_ptp_register(struct xgbe_prv_data *pdata)
+{
+       struct ptp_clock_info *info = &pdata->ptp_clock_info;
+       struct ptp_clock *clock;
+       struct cyclecounter *cc = &pdata->tstamp_cc;
+       u64 dividend;
+
+       snprintf(info->name, sizeof(info->name), "%s",
+                netdev_name(pdata->netdev));
+       info->owner = THIS_MODULE;
+       info->max_adj = clk_get_rate(pdata->ptpclk);
+       info->adjfreq = xgbe_adjfreq;
+       info->adjtime = xgbe_adjtime;
+       info->gettime = xgbe_gettime;
+       info->settime = xgbe_settime;
+       info->enable = xgbe_enable;
+
+       clock = ptp_clock_register(info, pdata->dev);
+       if (IS_ERR(clock)) {
+               dev_err(pdata->dev, "ptp_clock_register failed\n");
+               return;
+       }
+
+       pdata->ptp_clock = clock;
+
+       /* Calculate the addend:
+        *   addend = 2^32 / (PTP ref clock / 50Mhz)
+        *          = (2^32 * 50Mhz) / PTP ref clock
+        */
+       dividend = 50000000;
+       dividend <<= 32;
+       pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));
+
+       /* Setup the timecounter */
+       cc->read = xgbe_cc_read;
+       cc->mask = CLOCKSOURCE_MASK(64);
+       cc->mult = 1;
+       cc->shift = 0;
+
+       timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
+                        ktime_to_ns(ktime_get_real()));
+
+       /* Disable all timestamping to start */
+       XGMAC_IOWRITE(pdata, MAC_TCR, 0);
+       pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+       pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+}
+
+void xgbe_ptp_unregister(struct xgbe_prv_data *pdata)
+{
+       if (pdata->ptp_clock)
+               ptp_clock_unregister(pdata->ptp_clock);
+}
index ab06271..07bf70a 100644 (file)
 #include <linux/netdevice.h>
 #include <linux/workqueue.h>
 #include <linux/phy.h>
+#include <linux/if_vlan.h>
+#include <linux/bitops.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <net/dcbnl.h>
 
 
 #define XGBE_DRV_NAME          "amd-xgbe"
 #define XGBE_DRV_DESC          "AMD 10 Gigabit Ethernet Driver"
 
 /* Descriptor related defines */
-#define TX_DESC_CNT            512
-#define TX_DESC_MIN_FREE       (TX_DESC_CNT >> 3)
-#define TX_DESC_MAX_PROC       (TX_DESC_CNT >> 1)
-#define RX_DESC_CNT            512
+#define XGBE_TX_DESC_CNT       512
+#define XGBE_TX_DESC_MIN_FREE  (XGBE_TX_DESC_CNT >> 3)
+#define XGBE_TX_DESC_MAX_PROC  (XGBE_TX_DESC_CNT >> 1)
+#define XGBE_RX_DESC_CNT       512
 
-#define TX_MAX_BUF_SIZE                (0x3fff & ~(64 - 1))
+#define XGBE_TX_MAX_BUF_SIZE   (0x3fff & ~(64 - 1))
 
-#define RX_MIN_BUF_SIZE                (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
-#define RX_BUF_ALIGN           64
+#define XGBE_RX_MIN_BUF_SIZE   (ETH_FRAME_LEN + ETH_FCS_LEN + VLAN_HLEN)
+#define XGBE_RX_BUF_ALIGN      64
 
 #define XGBE_MAX_DMA_CHANNELS  16
-#define DMA_ARDOMAIN_SETTING   0x2
-#define DMA_ARCACHE_SETTING    0xb
-#define DMA_AWDOMAIN_SETTING   0x2
-#define DMA_AWCACHE_SETTING    0x7
-#define DMA_INTERRUPT_MASK     0x31c7
+#define XGBE_MAX_QUEUES                16
+
+/* DMA cache settings - Outer sharable, write-back, write-allocate */
+#define XGBE_DMA_OS_AXDOMAIN   0x2
+#define XGBE_DMA_OS_ARCACHE    0xb
+#define XGBE_DMA_OS_AWCACHE    0xf
+
+/* DMA cache settings - System, no caches used */
+#define XGBE_DMA_SYS_AXDOMAIN  0x3
+#define XGBE_DMA_SYS_ARCACHE   0x0
+#define XGBE_DMA_SYS_AWCACHE   0x0
+
+#define XGBE_DMA_INTERRUPT_MASK        0x31c7
 
 #define XGMAC_MIN_PACKET       60
 #define XGMAC_STD_PACKET_MTU   1500
 #define XGMAC_JUMBO_PACKET_MTU 9000
 #define XGMAC_MAX_JUMBO_PACKET 9018
 
-#define MAX_MULTICAST_LIST     14
-#define TX_FLAGS_IP_PKT                0x00000001
-#define TX_FLAGS_TCP_PKT       0x00000002
-
 /* MDIO bus phy name */
 #define XGBE_PHY_NAME          "amd_xgbe_phy"
 #define XGBE_PRTAD             0
 
+/* Device-tree clock names */
+#define XGBE_DMA_CLOCK         "dma_clk"
+#define XGBE_PTP_CLOCK         "ptp_clk"
+
+/* Timestamp support - values based on 50MHz PTP clock
+ *   50MHz => 20 nsec
+ */
+#define XGBE_TSTAMP_SSINC      20
+#define XGBE_TSTAMP_SNSINC     0
+
 /* Driver PMT macros */
 #define XGMAC_DRIVER_CONTEXT   1
 #define XGMAC_IOCTL_CONTEXT    2
 
-#define FIFO_SIZE_B(x)         (x)
-#define FIFO_SIZE_KB(x)                (x * 1024)
+#define XGBE_FIFO_SIZE_B(x)    (x)
+#define XGBE_FIFO_SIZE_KB(x)   (x * 1024)
 
-#define XGBE_TC_CNT            2
+#define XGBE_TC_MIN_QUANTUM    10
 
 /* Helper macro for descriptor handling
- *  Always use GET_DESC_DATA to access the descriptor data
+ *  Always use XGBE_GET_DESC_DATA to access the descriptor data
  *  since the index is free-running and needs to be and-ed
  *  with the descriptor count value of the ring to index to
  *  the proper descriptor data.
  */
-#define GET_DESC_DATA(_ring, _idx)                             \
+#define XGBE_GET_DESC_DATA(_ring, _idx)                                \
        ((_ring)->rdata +                                       \
         ((_idx) & ((_ring)->rdesc_count - 1)))
 
 
 /* Default coalescing parameters */
-#define XGMAC_INIT_DMA_TX_USECS                100
-#define XGMAC_INIT_DMA_TX_FRAMES       16
+#define XGMAC_INIT_DMA_TX_USECS                50
+#define XGMAC_INIT_DMA_TX_FRAMES       25
 
 #define XGMAC_MAX_DMA_RIWT             0xff
-#define XGMAC_INIT_DMA_RX_USECS                100
-#define XGMAC_INIT_DMA_RX_FRAMES       16
+#define XGMAC_INIT_DMA_RX_USECS                30
+#define XGMAC_INIT_DMA_RX_FRAMES       25
 
 /* Flow control queue count */
 #define XGMAC_MAX_FLOW_CONTROL_QUEUES  8
 
+/* Maximum MAC address hash table size (256 bits = 8 bytes) */
+#define XGBE_MAC_HASH_TABLE_SIZE       8
 
 struct xgbe_prv_data;
 
@@ -207,6 +229,8 @@ struct xgbe_packet_data {
        unsigned short mss;
 
        unsigned short vlan_ctag;
+
+       u64 rx_tstamp;
 };
 
 /* Common Rx and Tx descriptor mapping */
@@ -219,7 +243,7 @@ struct xgbe_ring_desc {
 
 /* Structure used to hold information related to the descriptor
  * and the packet associated with the descriptor (always use
- * use the GET_DESC_DATA macro to access this data from the ring)
+ * use the XGBE_GET_DESC_DATA macro to access this data from the ring)
  */
 struct xgbe_ring_data {
        struct xgbe_ring_desc *rdesc;   /* Virtual address of descriptor */
@@ -235,6 +259,20 @@ struct xgbe_ring_data {
        unsigned int interrupt;         /* Interrupt indicator */
 
        unsigned int mapped_as_page;
+
+       /* Incomplete receive save location.  If the budget is exhausted
+        * or the last descriptor (last normal descriptor or a following
+        * context descriptor) has not been DMA'd yet the current state
+        * of the receive processing needs to be saved.
+        */
+       unsigned int state_saved;
+       struct {
+               unsigned int incomplete;
+               unsigned int context_next;
+               struct sk_buff *skb;
+               unsigned int len;
+               unsigned int error;
+       } state;
 };
 
 struct xgbe_ring {
@@ -250,7 +288,7 @@ struct xgbe_ring {
        unsigned int rdesc_count;
 
        /* Array of descriptor data corresponding the descriptor memory
-        * (always use the GET_DESC_DATA macro to access this data)
+        * (always use the XGBE_GET_DESC_DATA macro to access this data)
         */
        struct xgbe_ring_data *rdata;
 
@@ -304,13 +342,13 @@ struct xgbe_channel {
 } ____cacheline_aligned;
 
 enum xgbe_int {
-       XGMAC_INT_DMA_ISR_DC0IS,
        XGMAC_INT_DMA_CH_SR_TI,
        XGMAC_INT_DMA_CH_SR_TPS,
        XGMAC_INT_DMA_CH_SR_TBU,
        XGMAC_INT_DMA_CH_SR_RI,
        XGMAC_INT_DMA_CH_SR_RBU,
        XGMAC_INT_DMA_CH_SR_RPS,
+       XGMAC_INT_DMA_CH_SR_TI_RI,
        XGMAC_INT_DMA_CH_SR_FBE,
        XGMAC_INT_DMA_ALL,
 };
@@ -386,7 +424,7 @@ struct xgbe_hw_if {
 
        int (*set_promiscuous_mode)(struct xgbe_prv_data *, unsigned int);
        int (*set_all_multicast_mode)(struct xgbe_prv_data *, unsigned int);
-       int (*set_addn_mac_addrs)(struct xgbe_prv_data *, unsigned int);
+       int (*add_mac_addresses)(struct xgbe_prv_data *);
        int (*set_mac_address)(struct xgbe_prv_data *, u8 *addr);
 
        int (*enable_rx_csum)(struct xgbe_prv_data *);
@@ -394,6 +432,9 @@ struct xgbe_hw_if {
 
        int (*enable_rx_vlan_stripping)(struct xgbe_prv_data *);
        int (*disable_rx_vlan_stripping)(struct xgbe_prv_data *);
+       int (*enable_rx_vlan_filtering)(struct xgbe_prv_data *);
+       int (*disable_rx_vlan_filtering)(struct xgbe_prv_data *);
+       int (*update_vlan_hash_table)(struct xgbe_prv_data *);
 
        int (*read_mmd_regs)(struct xgbe_prv_data *, int, int);
        void (*write_mmd_regs)(struct xgbe_prv_data *, int, int, int);
@@ -457,6 +498,18 @@ struct xgbe_hw_if {
        void (*rx_mmc_int)(struct xgbe_prv_data *);
        void (*tx_mmc_int)(struct xgbe_prv_data *);
        void (*read_mmc_stats)(struct xgbe_prv_data *);
+
+       /* For Timestamp config */
+       int (*config_tstamp)(struct xgbe_prv_data *, unsigned int);
+       void (*update_tstamp_addend)(struct xgbe_prv_data *, unsigned int);
+       void (*set_tstamp_time)(struct xgbe_prv_data *, unsigned int sec,
+                               unsigned int nsec);
+       u64 (*get_tstamp_time)(struct xgbe_prv_data *);
+       u64 (*get_tx_tstamp)(struct xgbe_prv_data *);
+
+       /* For Data Center Bridging config */
+       void (*config_dcb_tc)(struct xgbe_prv_data *);
+       void (*config_dcb_pfc)(struct xgbe_prv_data *);
 };
 
 struct xgbe_desc_if {
@@ -498,6 +551,7 @@ struct xgbe_hw_features {
        unsigned int tso;               /* TCP Segmentation Offload */
        unsigned int dma_debug;         /* DMA Debug Registers */
        unsigned int rss;               /* Receive Side Scaling */
+       unsigned int tc_cnt;            /* Number of Traffic Classes */
        unsigned int hash_table_size;   /* Hash Table Size */
        unsigned int l3l4_filter_num;   /* Number of L3-L4 Filters */
 
@@ -530,6 +584,11 @@ struct xgbe_prv_data {
        struct xgbe_hw_if hw_if;
        struct xgbe_desc_if desc_if;
 
+       /* AXI DMA settings */
+       unsigned int axdomain;
+       unsigned int arcache;
+       unsigned int awcache;
+
        /* Rings for Tx/Rx on a DMA channel */
        struct xgbe_channel *channel;
        unsigned int channel_count;
@@ -538,6 +597,9 @@ struct xgbe_prv_data {
        unsigned int rx_ring_count;
        unsigned int rx_desc_count;
 
+       unsigned int tx_q_count;
+       unsigned int rx_q_count;
+
        /* Tx/Rx common settings */
        unsigned int pblx8;
 
@@ -589,8 +651,30 @@ struct xgbe_prv_data {
        struct napi_struct napi;
        struct xgbe_mmc_stats mmc_stats;
 
-       /* System clock value used for Rx watchdog */
-       struct clk *sysclock;
+       /* Filtering support */
+       unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+       /* Device clocks */
+       struct clk *sysclk;
+       struct clk *ptpclk;
+
+       /* Timestamp support */
+       spinlock_t tstamp_lock;
+       struct ptp_clock_info ptp_clock_info;
+       struct ptp_clock *ptp_clock;
+       struct hwtstamp_config tstamp_config;
+       struct cyclecounter tstamp_cc;
+       struct timecounter tstamp_tc;
+       unsigned int tstamp_addend;
+       struct work_struct tx_tstamp_work;
+       struct sk_buff *tx_tstamp_skb;
+       u64 tx_tstamp;
+
+       /* DCB support */
+       struct ieee_ets *ets;
+       struct ieee_pfc *pfc;
+       unsigned int q2tc_map[XGBE_MAX_QUEUES];
+       unsigned int prio2q_map[IEEE_8021QAZ_MAX_TCS];
 
        /* Hardware features of the device */
        struct xgbe_hw_features hw_feat;
@@ -617,10 +701,15 @@ void xgbe_init_function_ptrs_dev(struct xgbe_hw_if *);
 void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *);
 struct net_device_ops *xgbe_get_netdev_ops(void);
 struct ethtool_ops *xgbe_get_ethtool_ops(void);
+#ifdef CONFIG_AMD_XGBE_DCB
+const struct dcbnl_rtnl_ops *xgbe_get_dcbnl_ops(void);
+#endif
 
 int xgbe_mdio_register(struct xgbe_prv_data *);
 void xgbe_mdio_unregister(struct xgbe_prv_data *);
 void xgbe_dump_phy_registers(struct xgbe_prv_data *);
+void xgbe_ptp_register(struct xgbe_prv_data *);
+void xgbe_ptp_unregister(struct xgbe_prv_data *);
 void xgbe_dump_tx_desc(struct xgbe_ring *, unsigned int, unsigned int,
                       unsigned int);
 void xgbe_dump_rx_desc(struct xgbe_ring *, struct xgbe_ring_desc *,
index 53f85bf..36cc9bd 100644 (file)
@@ -105,12 +105,10 @@ struct buffer_state {
 /**
  * struct arc_emac_priv - Storage of EMAC's private information.
  * @dev:       Pointer to the current device.
- * @ndev:      Pointer to the current network device.
  * @phy_dev:   Pointer to attached PHY device.
  * @bus:       Pointer to the current MII bus.
  * @regs:      Base address of EMAC memory-mapped control registers.
  * @napi:      Structure for NAPI.
- * @stats:     Network device statistics.
  * @rxbd:      Pointer to Rx BD ring.
  * @txbd:      Pointer to Tx BD ring.
  * @rxbd_dma:  DMA handle for Rx BD ring.
@@ -127,7 +125,6 @@ struct buffer_state {
 struct arc_emac_priv {
        /* Devices */
        struct device *dev;
-       struct net_device *ndev;
        struct phy_device *phy_dev;
        struct mii_bus *bus;
 
@@ -135,7 +132,6 @@ struct arc_emac_priv {
        struct clk *clk;
 
        struct napi_struct napi;
-       struct net_device_stats stats;
 
        struct arc_emac_bd *rxbd;
        struct arc_emac_bd *txbd;
index 18e2fac..fe5cfea 100644 (file)
@@ -140,7 +140,7 @@ static const struct ethtool_ops arc_emac_ethtool_ops = {
 static void arc_emac_tx_clean(struct net_device *ndev)
 {
        struct arc_emac_priv *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        unsigned int i;
 
        for (i = 0; i < TX_BD_NUM; i++) {
@@ -202,7 +202,7 @@ static int arc_emac_rx(struct net_device *ndev, int budget)
 
        for (work_done = 0; work_done < budget; work_done++) {
                unsigned int *last_rx_bd = &priv->last_rx_bd;
-               struct net_device_stats *stats = &priv->stats;
+               struct net_device_stats *stats = &ndev->stats;
                struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];
                struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
                unsigned int pktlen, info = le32_to_cpu(rxbd->info);
@@ -318,7 +318,7 @@ static irqreturn_t arc_emac_intr(int irq, void *dev_instance)
 {
        struct net_device *ndev = dev_instance;
        struct arc_emac_priv *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        unsigned int status;
 
        status = arc_reg_get(priv, R_STATUS);
@@ -529,7 +529,7 @@ static int arc_emac_stop(struct net_device *ndev)
 static struct net_device_stats *arc_emac_stats(struct net_device *ndev)
 {
        struct arc_emac_priv *priv = netdev_priv(ndev);
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        unsigned long miss, rxerr;
        u8 rxcrc, rxfram, rxoflow;
 
@@ -565,7 +565,7 @@ static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
 {
        struct arc_emac_priv *priv = netdev_priv(ndev);
        unsigned int len, *txbd_curr = &priv->txbd_curr;
-       struct net_device_stats *stats = &priv->stats;
+       struct net_device_stats *stats = &ndev->stats;
        __le32 *info = &priv->txbd[*txbd_curr].info;
        dma_addr_t addr;
 
@@ -720,7 +720,6 @@ static int arc_emac_probe(struct platform_device *pdev)
 
        priv = netdev_priv(ndev);
        priv->dev = &pdev->dev;
-       priv->ndev = ndev;
 
        priv->regs = devm_ioremap_resource(&pdev->dev, &res_regs);
        if (IS_ERR(priv->regs)) {
index 1cda49a..52fdfe2 100644 (file)
@@ -639,7 +639,6 @@ int atl1c_phy_init(struct atl1c_hw *hw)
                        dev_err(&pdev->dev, "Wrong Media type %d\n",
                                hw->media_type);
                return -1;
-               break;
        }
 
        ret_val = atl1c_write_phy_reg(hw, MII_BMCR, mii_bmcr_data);
@@ -682,7 +681,6 @@ int atl1c_get_speed_and_duplex(struct atl1c_hw *hw, u16 *speed, u16 *duplex)
                break;
        default:
                return -1;
-               break;
        }
 
        if (phy_data & GIGA_PSSR_DPLX)
index 923063d..113565d 100644 (file)
@@ -618,7 +618,6 @@ int atl1e_get_speed_and_duplex(struct atl1e_hw *hw, u16 *speed, u16 *duplex)
                break;
        default:
                return AT_ERR_PHY_SPEED;
-               break;
        }
 
        if (phy_data & MII_AT001_PSSR_DPLX)
index b460db7..1546d55 100644 (file)
@@ -910,7 +910,6 @@ static s32 atl1_get_speed_and_duplex(struct atl1_hw *hw, u16 *speed, u16 *duplex
                if (netif_msg_hw(adapter))
                        dev_dbg(&pdev->dev, "error getting speed\n");
                return ATLX_ERR_PHY_SPEED;
-               break;
        }
        if (phy_data & MII_ATLX_PSSR_DPLX)
                *duplex = FULL_DUPLEX;
index 6746bd7..c194bc6 100644 (file)
@@ -2493,7 +2493,6 @@ static s32 atl2_get_speed_and_duplex(struct atl2_hw *hw, u16 *speed,
                break;
        default:
                return ATLX_ERR_PHY_SPEED;
-               break;
        }
 
        if (phy_data & MII_ATLX_PSSR_DPLX)
@@ -2933,11 +2932,9 @@ static int atl2_validate_option(int *value, struct atl2_option *opt)
                case OPTION_ENABLED:
                        printk(KERN_INFO "%s Enabled\n", opt->name);
                        return 0;
-                       break;
                case OPTION_DISABLED:
                        printk(KERN_INFO "%s Disabled\n", opt->name);
                        return 0;
-                       break;
                }
                break;
        case range_option:
index 3e48809..7dcfb19 100644 (file)
@@ -72,23 +72,23 @@ config BCMGENET
          Broadcom BCM7xxx Set Top Box family chipset.
 
 config BNX2
-       tristate "Broadcom NetXtremeII support"
+       tristate "QLogic NetXtremeII support"
        depends on PCI
        select CRC32
        select FW_LOADER
        ---help---
-         This driver supports Broadcom NetXtremeII gigabit Ethernet cards.
+         This driver supports QLogic NetXtremeII gigabit Ethernet cards.
 
          To compile this driver as a module, choose M here: the module
          will be called bnx2.  This is recommended.
 
 config CNIC
-       tristate "Broadcom CNIC support"
+       tristate "QLogic CNIC support"
        depends on PCI
        select BNX2
        select UIO
        ---help---
-         This driver supports offload features of Broadcom NetXtremeII
+         This driver supports offload features of QLogic NetXtremeII
          gigabit Ethernet cards.
 
          To compile this driver as a module, choose M here: the module
index 141160e..6f4e186 100644 (file)
@@ -81,14 +81,14 @@ static inline void dma_desc_set_addr(struct bcm_sysport_priv *priv,
 {
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
        __raw_writel(upper_32_bits(addr) & DESC_ADDR_HI_MASK,
-                       d + DESC_ADDR_HI_STATUS_LEN);
+                    d + DESC_ADDR_HI_STATUS_LEN);
 #endif
        __raw_writel(lower_32_bits(addr), d + DESC_ADDR_LO);
 }
 
 static inline void tdma_port_write_desc_addr(struct bcm_sysport_priv *priv,
-                                               struct dma_desc *desc,
-                                               unsigned int port)
+                                            struct dma_desc *desc,
+                                            unsigned int port)
 {
        /* Ports are latched, so write upper address first */
        tdma_writel(priv, desc->addr_status_len, TDMA_WRITE_PORT_HI(port));
@@ -108,7 +108,7 @@ static int bcm_sysport_set_settings(struct net_device *dev,
 }
 
 static int bcm_sysport_get_settings(struct net_device *dev,
-                                       struct ethtool_cmd *cmd)
+                                   struct ethtool_cmd *cmd)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
 
@@ -119,14 +119,14 @@ static int bcm_sysport_get_settings(struct net_device *dev,
 }
 
 static int bcm_sysport_set_rx_csum(struct net_device *dev,
-                                       netdev_features_t wanted)
+                                  netdev_features_t wanted)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
 
-       priv->rx_csum_en = !!(wanted & NETIF_F_RXCSUM);
+       priv->rx_chk_en = !!(wanted & NETIF_F_RXCSUM);
        reg = rxchk_readl(priv, RXCHK_CONTROL);
-       if (priv->rx_csum_en)
+       if (priv->rx_chk_en)
                reg |= RXCHK_EN;
        else
                reg &= ~RXCHK_EN;
@@ -134,7 +134,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
        /* If UniMAC forwards CRC, we need to skip over it to get
         * a valid CHK bit to be set in the per-packet status word
         */
-       if (priv->rx_csum_en && priv->crc_fwd)
+       if (priv->rx_chk_en && priv->crc_fwd)
                reg |= RXCHK_SKIP_FCS;
        else
                reg &= ~RXCHK_SKIP_FCS;
@@ -145,7 +145,7 @@ static int bcm_sysport_set_rx_csum(struct net_device *dev,
 }
 
 static int bcm_sysport_set_tx_csum(struct net_device *dev,
-                                       netdev_features_t wanted)
+                                  netdev_features_t wanted)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        u32 reg;
@@ -165,7 +165,7 @@ static int bcm_sysport_set_tx_csum(struct net_device *dev,
 }
 
 static int bcm_sysport_set_features(struct net_device *dev,
-                                       netdev_features_t features)
+                                   netdev_features_t features)
 {
        netdev_features_t changed = features ^ dev->features;
        netdev_features_t wanted = dev->wanted_features;
@@ -261,7 +261,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
        /* RXCHK misc statistics */
        STAT_RXCHK("rxchk_bad_csum", mib.rxchk_bad_csum, RXCHK_BAD_CSUM_CNTR),
        STAT_RXCHK("rxchk_other_pkt_disc", mib.rxchk_other_pkt_disc,
-                       RXCHK_OTHER_DISC_CNTR),
+                  RXCHK_OTHER_DISC_CNTR),
        /* RBUF misc statistics */
        STAT_RBUF("rbuf_ovflow_cnt", mib.rbuf_ovflow_cnt, RBUF_OVFL_DISC_CNTR),
        STAT_RBUF("rbuf_err_cnt", mib.rbuf_err_cnt, RBUF_ERR_PKT_CNTR),
@@ -270,7 +270,7 @@ static const struct bcm_sysport_stats bcm_sysport_gstrings_stats[] = {
 #define BCM_SYSPORT_STATS_LEN  ARRAY_SIZE(bcm_sysport_gstrings_stats)
 
 static void bcm_sysport_get_drvinfo(struct net_device *dev,
-                                       struct ethtool_drvinfo *info)
+                                   struct ethtool_drvinfo *info)
 {
        strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
        strlcpy(info->version, "0.1", sizeof(info->version));
@@ -303,7 +303,7 @@ static int bcm_sysport_get_sset_count(struct net_device *dev, int string_set)
 }
 
 static void bcm_sysport_get_strings(struct net_device *dev,
-                                       u32 stringset, u8 *data)
+                                   u32 stringset, u8 *data)
 {
        int i;
 
@@ -311,8 +311,8 @@ static void bcm_sysport_get_strings(struct net_device *dev,
        case ETH_SS_STATS:
                for (i = 0; i < BCM_SYSPORT_STATS_LEN; i++) {
                        memcpy(data + i * ETH_GSTRING_LEN,
-                               bcm_sysport_gstrings_stats[i].stat_string,
-                               ETH_GSTRING_LEN);
+                              bcm_sysport_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
                }
                break;
        default:
@@ -362,7 +362,7 @@ static void bcm_sysport_update_mib_counters(struct bcm_sysport_priv *priv)
 }
 
 static void bcm_sysport_get_stats(struct net_device *dev,
-                                       struct ethtool_stats *stats, u64 *data)
+                                 struct ethtool_stats *stats, u64 *data)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        int i;
@@ -384,6 +384,64 @@ static void bcm_sysport_get_stats(struct net_device *dev,
        }
 }
 
+static void bcm_sysport_get_wol(struct net_device *dev,
+                               struct ethtool_wolinfo *wol)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+       wol->wolopts = priv->wolopts;
+
+       if (!(priv->wolopts & WAKE_MAGICSECURE))
+               return;
+
+       /* Return the programmed SecureOn password */
+       reg = umac_readl(priv, UMAC_PSW_MS);
+       put_unaligned_be16(reg, &wol->sopass[0]);
+       reg = umac_readl(priv, UMAC_PSW_LS);
+       put_unaligned_be32(reg, &wol->sopass[2]);
+}
+
+static int bcm_sysport_set_wol(struct net_device *dev,
+                              struct ethtool_wolinfo *wol)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       u32 supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+
+       if (!device_can_wakeup(kdev))
+               return -ENOTSUPP;
+
+       if (wol->wolopts & ~supported)
+               return -EINVAL;
+
+       /* Program the SecureOn password */
+       if (wol->wolopts & WAKE_MAGICSECURE) {
+               umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+                           UMAC_PSW_MS);
+               umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+                           UMAC_PSW_LS);
+       }
+
+       /* Flag the device and relevant IRQ as wakeup capable */
+       if (wol->wolopts) {
+               device_set_wakeup_enable(kdev, 1);
+               enable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = 0;
+       } else {
+               device_set_wakeup_enable(kdev, 0);
+               /* Avoid unbalanced disable_irq_wake calls */
+               if (!priv->wol_irq_disabled)
+                       disable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = 1;
+       }
+
+       priv->wolopts = wol->wolopts;
+
+       return 0;
+}
+
 static void bcm_sysport_free_cb(struct bcm_sysport_cb *cb)
 {
        dev_kfree_skb_any(cb->skb);
@@ -406,7 +464,7 @@ static int bcm_sysport_rx_refill(struct bcm_sysport_priv *priv,
        }
 
        mapping = dma_map_single(kdev, cb->skb->data,
-                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
        ret = dma_mapping_error(kdev, mapping);
        if (ret) {
                bcm_sysport_free_cb(cb);
@@ -470,22 +528,20 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                to_process = p_index - priv->rx_c_index;
 
        netif_dbg(priv, rx_status, ndev,
-                       "p_index=%d rx_c_index=%d to_process=%d\n",
-                       p_index, priv->rx_c_index, to_process);
-
-       while ((processed < to_process) &&
-               (processed < budget)) {
+                 "p_index=%d rx_c_index=%d to_process=%d\n",
+                 p_index, priv->rx_c_index, to_process);
 
+       while ((processed < to_process) && (processed < budget)) {
                cb = &priv->rx_cbs[priv->rx_read_ptr];
                skb = cb->skb;
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-                               RX_BUF_LENGTH, DMA_FROM_DEVICE);
+                                RX_BUF_LENGTH, DMA_FROM_DEVICE);
 
                /* Extract the Receive Status Block prepended */
                rsb = (struct bcm_rsb *)skb->data;
                len = (rsb->rx_status_len >> DESC_LEN_SHIFT) & DESC_LEN_MASK;
                status = (rsb->rx_status_len >> DESC_STATUS_SHIFT) &
-                       DESC_STATUS_MASK;
+                         DESC_STATUS_MASK;
 
                processed++;
                priv->rx_read_ptr++;
@@ -493,9 +549,9 @@ static unsigned int bcm_sysport_desc_rx(struct bcm_sysport_priv *priv,
                        priv->rx_read_ptr = 0;
 
                netif_dbg(priv, rx_status, ndev,
-                               "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
-                               p_index, priv->rx_c_index, priv->rx_read_ptr,
-                               len, status);
+                         "p=%d, c=%d, rd_ptr=%d, len=%d, flag=0x%04x\n",
+                         p_index, priv->rx_c_index, priv->rx_read_ptr,
+                         len, status);
 
                if (unlikely(!skb)) {
                        netif_err(priv, rx_err, ndev, "out of memory!\n");
@@ -554,9 +610,9 @@ refill:
 }
 
 static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
-                                       struct bcm_sysport_cb *cb,
-                                       unsigned int *bytes_compl,
-                                       unsigned int *pkts_compl)
+                                      struct bcm_sysport_cb *cb,
+                                      unsigned int *bytes_compl,
+                                      unsigned int *pkts_compl)
 {
        struct device *kdev = &priv->pdev->dev;
        struct net_device *ndev = priv->netdev;
@@ -565,8 +621,8 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
                ndev->stats.tx_bytes += cb->skb->len;
                *bytes_compl += cb->skb->len;
                dma_unmap_single(kdev, dma_unmap_addr(cb, dma_addr),
-                               dma_unmap_len(cb, dma_len),
-                               DMA_TO_DEVICE);
+                                dma_unmap_len(cb, dma_len),
+                                DMA_TO_DEVICE);
                ndev->stats.tx_packets++;
                (*pkts_compl)++;
                bcm_sysport_free_cb(cb);
@@ -574,7 +630,7 @@ static void bcm_sysport_tx_reclaim_one(struct bcm_sysport_priv *priv,
        } else if (dma_unmap_addr(cb, dma_addr)) {
                ndev->stats.tx_bytes += dma_unmap_len(cb, dma_len);
                dma_unmap_page(kdev, dma_unmap_addr(cb, dma_addr),
-                               dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
+                              dma_unmap_len(cb, dma_len), DMA_TO_DEVICE);
                dma_unmap_addr_set(cb, dma_addr, 0);
        }
 }
@@ -608,8 +664,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
                last_tx_cn = num_tx_cbs - last_c_index + c_index;
 
        netif_dbg(priv, tx_done, ndev,
-                       "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
-                       ring->index, c_index, last_tx_cn, last_c_index);
+                 "ring=%d c_index=%d last_tx_cn=%d last_c_index=%d\n",
+                 ring->index, c_index, last_tx_cn, last_c_index);
 
        while (last_tx_cn-- > 0) {
                cb = ring->cbs + last_c_index;
@@ -626,8 +682,8 @@ static unsigned int __bcm_sysport_tx_reclaim(struct bcm_sysport_priv *priv,
                netif_tx_wake_queue(txq);
 
        netif_dbg(priv, tx_done, ndev,
-                       "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
-                       ring->index, ring->c_index, pkts_compl, bytes_compl);
+                 "ring=%d c_index=%d pkts_compl=%d, bytes_compl=%d\n",
+                 ring->index, ring->c_index, pkts_compl, bytes_compl);
 
        return pkts_compl;
 }
@@ -654,13 +710,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
 
        work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
 
-       if (work_done < budget) {
+       if (work_done == 0) {
                napi_complete(napi);
                /* re-enable TX interrupt */
                intrl2_1_mask_clear(ring->priv, BIT(ring->index));
        }
 
-       return work_done;
+       return 0;
 }
 
 static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -692,6 +748,20 @@ static int bcm_sysport_poll(struct napi_struct *napi, int budget)
        return work_done;
 }
 
+static void bcm_sysport_resume_from_wol(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+
+       /* Stop monitoring MPD interrupt */
+       intrl2_0_mask_set(priv, INTRL2_0_MPD);
+
+       /* Clear the MagicPacket detection logic */
+       reg = umac_readl(priv, UMAC_MPD_CTRL);
+       reg &= ~MPD_EN;
+       umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
+}
 
 /* RX and misc interrupt routine */
 static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
@@ -722,6 +792,11 @@ static irqreturn_t bcm_sysport_rx_isr(int irq, void *dev_id)
        if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
                bcm_sysport_tx_reclaim_all(priv);
 
+       if (priv->irq0_stat & INTRL2_0_MPD) {
+               netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
+               bcm_sysport_resume_from_wol(priv);
+       }
+
        return IRQ_HANDLED;
 }
 
@@ -757,6 +832,15 @@ static irqreturn_t bcm_sysport_tx_isr(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t bcm_sysport_wol_isr(int irq, void *dev_id)
+{
+       struct bcm_sysport_priv *priv = dev_id;
+
+       pm_wakeup_event(&priv->pdev->dev, 0);
+
+       return IRQ_HANDLED;
+}
+
 static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
 {
        struct sk_buff *nskb;
@@ -804,8 +888,9 @@ static int bcm_sysport_insert_tsb(struct sk_buff *skb, struct net_device *dev)
                        csum_info |= L4_LENGTH_VALID;
                        if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
                                csum_info |= L4_UDP;
-               } else
+               } else {
                        csum_info = 0;
+               }
 
                tsb->l4_ptr_dest_map = csum_info;
        }
@@ -869,7 +954,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        mapping = dma_map_single(kdev, skb->data, skb_len, DMA_TO_DEVICE);
        if (dma_mapping_error(kdev, mapping)) {
                netif_err(priv, tx_err, dev, "DMA map failed at %p (len=%d)\n",
-                               skb->data, skb_len);
+                         skb->data, skb_len);
                ret = NETDEV_TX_OK;
                goto out;
        }
@@ -887,7 +972,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
        len_status = upper_32_bits(mapping) & DESC_ADDR_HI_MASK;
        len_status |= (skb_len << DESC_LEN_SHIFT);
        len_status |= (DESC_SOP | DESC_EOP | TX_STATUS_APP_CRC) <<
-                       DESC_STATUS_SHIFT;
+                      DESC_STATUS_SHIFT;
        if (skb->ip_summed == CHECKSUM_PARTIAL)
                len_status |= (DESC_L4_CSUM << DESC_STATUS_SHIFT);
 
@@ -912,7 +997,7 @@ static netdev_tx_t bcm_sysport_xmit(struct sk_buff *skb,
                netif_tx_stop_queue(txq);
 
        netif_dbg(priv, tx_queued, dev, "ring=%d desc_count=%d, curr_desc=%d\n",
-                       ring->index, ring->desc_count, ring->curr_desc);
+                 ring->index, ring->desc_count, ring->curr_desc);
 
        ret = NETDEV_TX_OK;
 out:
@@ -1010,7 +1095,7 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
                return -ENOMEM;
        }
 
-       ring->cbs = kzalloc(sizeof(struct bcm_sysport_cb) * size, GFP_KERNEL);
+       ring->cbs = kcalloc(size, sizeof(struct bcm_sysport_cb), GFP_KERNEL);
        if (!ring->cbs) {
                netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
                return -ENOMEM;
@@ -1050,14 +1135,14 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
        napi_enable(&ring->napi);
 
        netif_dbg(priv, hw, priv->netdev,
-                       "TDMA cfg, size=%d, desc_cpu=%p\n",
-                       ring->size, ring->desc_cpu);
+                 "TDMA cfg, size=%d, desc_cpu=%p\n",
+                 ring->size, ring->desc_cpu);
 
        return 0;
 }
 
 static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
-                                       unsigned int index)
+                                    unsigned int index)
 {
        struct bcm_sysport_tx_ring *ring = &priv->tx_rings[index];
        struct device *kdev = &priv->pdev->dev;
@@ -1088,7 +1173,7 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
 
 /* RDMA helper */
 static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
-                                       unsigned int enable)
+                                 unsigned int enable)
 {
        unsigned int timeout = 1000;
        u32 reg;
@@ -1115,7 +1200,7 @@ static inline int rdma_enable_set(struct bcm_sysport_priv *priv,
 
 /* TDMA helper */
 static inline int tdma_enable_set(struct bcm_sysport_priv *priv,
-                                       unsigned int enable)
+                                 unsigned int enable)
 {
        unsigned int timeout = 1000;
        u32 reg;
@@ -1153,8 +1238,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
        priv->rx_bd_assign_index = 0;
        priv->rx_c_index = 0;
        priv->rx_read_ptr = 0;
-       priv->rx_cbs = kzalloc(priv->num_rx_bds *
-                               sizeof(struct bcm_sysport_cb), GFP_KERNEL);
+       priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct bcm_sysport_cb),
+                               GFP_KERNEL);
        if (!priv->rx_cbs) {
                netif_err(priv, hw, priv->netdev, "CB allocation failed\n");
                return -ENOMEM;
@@ -1186,8 +1271,8 @@ static int bcm_sysport_init_rx_ring(struct bcm_sysport_priv *priv)
        rdma_writel(priv, 1, RDMA_MBDONE_INTR);
 
        netif_dbg(priv, hw, priv->netdev,
-                       "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
-                       priv->num_rx_bds, priv->rx_bds);
+                 "RDMA cfg, num_rx_bds=%d, rx_bds=%p\n",
+                 priv->num_rx_bds, priv->rx_bds);
 
        return 0;
 }
@@ -1207,8 +1292,8 @@ static void bcm_sysport_fini_rx_ring(struct bcm_sysport_priv *priv)
                cb = &priv->rx_cbs[i];
                if (dma_unmap_addr(cb, dma_addr))
                        dma_unmap_single(&priv->pdev->dev,
-                                       dma_unmap_addr(cb, dma_addr),
-                                       RX_BUF_LENGTH, DMA_FROM_DEVICE);
+                                        dma_unmap_addr(cb, dma_addr),
+                                        RX_BUF_LENGTH, DMA_FROM_DEVICE);
                bcm_sysport_free_cb(cb);
        }
 
@@ -1236,15 +1321,15 @@ static void bcm_sysport_set_rx_mode(struct net_device *dev)
 }
 
 static inline void umac_enable_set(struct bcm_sysport_priv *priv,
-                                       unsigned int enable)
+                                  u32 mask, unsigned int enable)
 {
        u32 reg;
 
        reg = umac_readl(priv, UMAC_CMD);
        if (enable)
-               reg |= CMD_RX_EN | CMD_TX_EN;
+               reg |= mask;
        else
-               reg &= ~(CMD_RX_EN | CMD_TX_EN);
+               reg &= ~mask;
        umac_writel(priv, reg, UMAC_CMD);
 
        /* UniMAC stops on a packet boundary, wait for a full-sized packet
@@ -1254,32 +1339,21 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
                usleep_range(1000, 2000);
 }
 
-static inline int umac_reset(struct bcm_sysport_priv *priv)
+static inline void umac_reset(struct bcm_sysport_priv *priv)
 {
-       unsigned int timeout = 0;
        u32 reg;
-       int ret = 0;
 
-       umac_writel(priv, 0, UMAC_CMD);
-       while (timeout++ < 1000) {
-               reg = umac_readl(priv, UMAC_CMD);
-               if (!(reg & CMD_SW_RESET))
-                       break;
-
-               udelay(1);
-       }
-
-       if (timeout == 1000) {
-               dev_err(&priv->pdev->dev,
-                       "timeout waiting for MAC to come out of reset\n");
-               ret = -ETIMEDOUT;
-       }
-
-       return ret;
+       reg = umac_readl(priv, UMAC_CMD);
+       reg |= CMD_SW_RESET;
+       umac_writel(priv, reg, UMAC_CMD);
+       udelay(10);
+       reg = umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_SW_RESET;
+       umac_writel(priv, reg, UMAC_CMD);
 }
 
 static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
-                               unsigned char *addr)
+                            unsigned char *addr)
 {
        umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
                        (addr[2] << 8) | addr[3], UMAC_MAC0);
@@ -1295,30 +1369,48 @@ static void topctrl_flush(struct bcm_sysport_priv *priv)
        topctrl_writel(priv, 0, TX_FLUSH_CNTL);
 }
 
+static void bcm_sysport_netif_start(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+
+       /* Enable NAPI */
+       napi_enable(&priv->napi);
+
+       phy_start(priv->phydev);
+
+       /* Enable TX interrupts for the 32 TXQs */
+       intrl2_1_mask_clear(priv, 0xffffffff);
+
+       /* Last call before we start the real business */
+       netif_tx_start_all_queues(dev);
+}
+
+static void rbuf_init(struct bcm_sysport_priv *priv)
+{
+       u32 reg;
+
+       reg = rbuf_readl(priv, RBUF_CONTROL);
+       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
+       rbuf_writel(priv, reg, RBUF_CONTROL);
+}
+
 static int bcm_sysport_open(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
        unsigned int i;
-       u32 reg;
        int ret;
 
        /* Reset UniMAC */
-       ret = umac_reset(priv);
-       if (ret) {
-               netdev_err(dev, "UniMAC reset failed\n");
-               return ret;
-       }
+       umac_reset(priv);
 
        /* Flush TX and RX FIFOs at TOPCTRL level */
        topctrl_flush(priv);
 
        /* Disable the UniMAC RX/TX */
-       umac_enable_set(priv, 0);
+       umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 0);
 
        /* Enable RBUF 2bytes alignment and Receive Status Block */
-       reg = rbuf_readl(priv, RBUF_CONTROL);
-       reg |= RBUF_4B_ALGN | RBUF_RSB_EN;
-       rbuf_writel(priv, reg, RBUF_CONTROL);
+       rbuf_init(priv);
 
        /* Set maximum frame length */
        umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
@@ -1366,7 +1458,7 @@ static int bcm_sysport_open(struct net_device *dev)
                ret = bcm_sysport_init_tx_ring(priv, i);
                if (ret) {
                        netdev_err(dev, "failed to initialize TX ring %d\n",
-                                       i);
+                                  i);
                        goto out_free_tx_ring;
                }
        }
@@ -1394,19 +1486,10 @@ static int bcm_sysport_open(struct net_device *dev)
        if (ret)
                goto out_clear_rx_int;
 
-       /* Enable NAPI */
-       napi_enable(&priv->napi);
-
        /* Turn on UniMAC TX/RX */
-       umac_enable_set(priv, 1);
-
-       phy_start(priv->phydev);
+       umac_enable_set(priv, CMD_RX_EN | CMD_TX_EN, 1);
 
-       /* Enable TX interrupts for the 32 TXQs */
-       intrl2_1_mask_clear(priv, 0xffffffff);
-
-       /* Last call before we start the real business */
-       netif_tx_start_all_queues(dev);
+       bcm_sysport_netif_start(dev);
 
        return 0;
 
@@ -1425,12 +1508,9 @@ out_phy_disconnect:
        return ret;
 }
 
-static int bcm_sysport_stop(struct net_device *dev)
+static void bcm_sysport_netif_stop(struct net_device *dev)
 {
        struct bcm_sysport_priv *priv = netdev_priv(dev);
-       unsigned int i;
-       u32 reg;
-       int ret;
 
        /* stop all software from updating hardware */
        netif_tx_stop_all_queues(dev);
@@ -1442,11 +1522,18 @@ static int bcm_sysport_stop(struct net_device *dev)
        intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
        intrl2_1_mask_set(priv, 0xffffffff);
        intrl2_1_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
+}
+
+static int bcm_sysport_stop(struct net_device *dev)
+{
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       int ret;
+
+       bcm_sysport_netif_stop(dev);
 
        /* Disable UniMAC RX */
-       reg = umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_RX_EN;
-       umac_writel(priv, reg, UMAC_CMD);
+       umac_enable_set(priv, CMD_RX_EN, 0);
 
        ret = tdma_enable_set(priv, 0);
        if (ret) {
@@ -1464,9 +1551,7 @@ static int bcm_sysport_stop(struct net_device *dev)
        }
 
        /* Disable UniMAC TX */
-       reg = umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_TX_EN;
-       umac_writel(priv, reg, UMAC_CMD);
+       umac_enable_set(priv, CMD_TX_EN, 0);
 
        /* Free RX/TX rings SW structures */
        for (i = 0; i < dev->num_tx_queues; i++)
@@ -1492,6 +1577,8 @@ static struct ethtool_ops bcm_sysport_ethtool_ops = {
        .get_strings            = bcm_sysport_get_strings,
        .get_ethtool_stats      = bcm_sysport_get_stats,
        .get_sset_count         = bcm_sysport_get_sset_count,
+       .get_wol                = bcm_sysport_get_wol,
+       .set_wol                = bcm_sysport_set_wol,
 };
 
 static const struct net_device_ops bcm_sysport_netdev_ops = {
@@ -1533,6 +1620,7 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        priv->irq0 = platform_get_irq(pdev, 0);
        priv->irq1 = platform_get_irq(pdev, 1);
+       priv->wol_irq = platform_get_irq(pdev, 2);
        if (priv->irq0 <= 0 || priv->irq1 <= 0) {
                dev_err(&pdev->dev, "invalid interrupts\n");
                ret = -EINVAL;
@@ -1585,16 +1673,17 @@ static int bcm_sysport_probe(struct platform_device *pdev)
        dev->hw_features |= NETIF_F_RXCSUM | NETIF_F_HIGHDMA |
                                NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
 
+       /* Request the WOL interrupt and advertise suspend if available */
+       priv->wol_irq_disabled = 1;
+       ret = devm_request_irq(&pdev->dev, priv->wol_irq,
+                              bcm_sysport_wol_isr, 0, dev->name, priv);
+       if (!ret)
+               device_set_wakeup_capable(&pdev->dev, 1);
+
        /* Set the needed headroom once and for all */
        BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
        dev->needed_headroom += sizeof(struct bcm_tsb);
 
-       /* We are interfaced to a switch which handles the multicast
-        * filtering for us, so we do not support programming any
-        * multicast hash table in this Ethernet MAC.
-        */
-       dev->flags &= ~IFF_MULTICAST;
-
        /* libphy will adjust the link state accordingly */
        netif_carrier_off(dev);
 
@@ -1606,10 +1695,10 @@ static int bcm_sysport_probe(struct platform_device *pdev)
 
        priv->rev = topctrl_readl(priv, REV_CNTL) & REV_MASK;
        dev_info(&pdev->dev,
-               "Broadcom SYSTEMPORT" REV_FMT
-               " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
-               (priv->rev >> 8) & 0xff, priv->rev & 0xff,
-               priv->base, priv->irq0, priv->irq1, txq, rxq);
+                "Broadcom SYSTEMPORT" REV_FMT
+                " at 0x%p (irqs: %d, %d, TXQs: %d, RXQs: %d)\n",
+                (priv->rev >> 8) & 0xff, priv->rev & 0xff,
+                priv->base, priv->irq0, priv->irq1, txq, rxq);
 
        return 0;
 err:
@@ -1631,6 +1720,208 @@ static int bcm_sysport_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int bcm_sysport_suspend_to_wol(struct bcm_sysport_priv *priv)
+{
+       struct net_device *ndev = priv->netdev;
+       unsigned int timeout = 1000;
+       u32 reg;
+
+       /* Password has already been programmed */
+       reg = umac_readl(priv, UMAC_MPD_CTRL);
+       reg |= MPD_EN;
+       reg &= ~PSW_EN;
+       if (priv->wolopts & WAKE_MAGICSECURE)
+               reg |= PSW_EN;
+       umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       /* Make sure RBUF entered WoL mode as result */
+       do {
+               reg = rbuf_readl(priv, RBUF_STATUS);
+               if (reg & RBUF_WOL_MODE)
+                       break;
+
+               udelay(10);
+       } while (timeout-- > 0);
+
+       /* Do not leave the UniMAC RBUF matching only MPD packets */
+       if (!timeout) {
+               reg = umac_readl(priv, UMAC_MPD_CTRL);
+               reg &= ~MPD_EN;
+               umac_writel(priv, reg, UMAC_MPD_CTRL);
+               netif_err(priv, wol, ndev, "failed to enter WOL mode\n");
+               return -ETIMEDOUT;
+       }
+
+       /* UniMAC receive needs to be turned on */
+       umac_enable_set(priv, CMD_RX_EN, 1);
+
+       /* Enable the interrupt wake-up source */
+       intrl2_0_mask_clear(priv, INTRL2_0_MPD);
+
+       netif_dbg(priv, wol, ndev, "entered WOL mode\n");
+
+       return 0;
+}
+
+static int bcm_sysport_suspend(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       int ret = 0;
+       u32 reg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       bcm_sysport_netif_stop(dev);
+
+       phy_suspend(priv->phydev);
+
+       netif_device_detach(dev);
+
+       /* Disable UniMAC RX */
+       umac_enable_set(priv, CMD_RX_EN, 0);
+
+       ret = rdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "RDMA timeout!\n");
+               return ret;
+       }
+
+       /* Disable RXCHK if enabled */
+       if (priv->rx_chk_en) {
+               reg = rxchk_readl(priv, RXCHK_CONTROL);
+               reg &= ~RXCHK_EN;
+               rxchk_writel(priv, reg, RXCHK_CONTROL);
+       }
+
+       /* Flush RX pipe */
+       if (!priv->wolopts)
+               topctrl_writel(priv, RX_FLUSH, RX_FLUSH_CNTL);
+
+       ret = tdma_enable_set(priv, 0);
+       if (ret) {
+               netdev_err(dev, "TDMA timeout!\n");
+               return ret;
+       }
+
+       /* Wait for a packet boundary */
+       usleep_range(2000, 3000);
+
+       umac_enable_set(priv, CMD_TX_EN, 0);
+
+       topctrl_writel(priv, TX_FLUSH, TX_FLUSH_CNTL);
+
+       /* Free RX/TX rings SW structures */
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       bcm_sysport_fini_rx_ring(priv);
+
+       /* Get prepared for Wake-on-LAN */
+       if (device_may_wakeup(d) && priv->wolopts)
+               ret = bcm_sysport_suspend_to_wol(priv);
+
+       return ret;
+}
+
+static int bcm_sysport_resume(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcm_sysport_priv *priv = netdev_priv(dev);
+       unsigned int i;
+       u32 reg;
+       int ret;
+
+       if (!netif_running(dev))
+               return 0;
+
+       /* We may have been suspended and never received a WOL event that
+        * would turn off MPD detection, take care of that now
+        */
+       bcm_sysport_resume_from_wol(priv);
+
+       /* Initialize both hardware and software ring */
+       for (i = 0; i < dev->num_tx_queues; i++) {
+               ret = bcm_sysport_init_tx_ring(priv, i);
+               if (ret) {
+                       netdev_err(dev, "failed to initialize TX ring %d\n",
+                                  i);
+                       goto out_free_tx_rings;
+               }
+       }
+
+       /* Initialize linked-list */
+       tdma_writel(priv, TDMA_LL_RAM_INIT_BUSY, TDMA_STATUS);
+
+       /* Initialize RX ring */
+       ret = bcm_sysport_init_rx_ring(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize RX ring\n");
+               goto out_free_rx_ring;
+       }
+
+       netif_device_attach(dev);
+
+       /* Enable RX interrupt and TX ring full interrupt */
+       intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
+
+       /* RX pipe enable */
+       topctrl_writel(priv, 0, RX_FLUSH_CNTL);
+
+       ret = rdma_enable_set(priv, 1);
+       if (ret) {
+               netdev_err(dev, "failed to enable RDMA\n");
+               goto out_free_rx_ring;
+       }
+
+       /* Enable rxhck */
+       if (priv->rx_chk_en) {
+               reg = rxchk_readl(priv, RXCHK_CONTROL);
+               reg |= RXCHK_EN;
+               rxchk_writel(priv, reg, RXCHK_CONTROL);
+       }
+
+       rbuf_init(priv);
+
+       /* Set maximum frame length */
+       umac_writel(priv, UMAC_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
+
+       /* Set MAC address */
+       umac_set_hw_addr(priv, dev->dev_addr);
+
+       umac_enable_set(priv, CMD_RX_EN, 1);
+
+       /* TX pipe enable */
+       topctrl_writel(priv, 0, TX_FLUSH_CNTL);
+
+       umac_enable_set(priv, CMD_TX_EN, 1);
+
+       ret = tdma_enable_set(priv, 1);
+       if (ret) {
+               netdev_err(dev, "TDMA timeout!\n");
+               goto out_free_rx_ring;
+       }
+
+       phy_resume(priv->phydev);
+
+       bcm_sysport_netif_start(dev);
+
+       return 0;
+
+out_free_rx_ring:
+       bcm_sysport_fini_rx_ring(priv);
+out_free_tx_rings:
+       for (i = 0; i < dev->num_tx_queues; i++)
+               bcm_sysport_fini_tx_ring(priv, i);
+       return ret;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(bcm_sysport_pm_ops,
+               bcm_sysport_suspend, bcm_sysport_resume);
+
 static const struct of_device_id bcm_sysport_of_match[] = {
        { .compatible = "brcm,systemport-v1.00" },
        { .compatible = "brcm,systemport" },
@@ -1644,6 +1935,7 @@ static struct platform_driver bcm_sysport_driver = {
                .name = "brcm-systemport",
                .owner = THIS_MODULE,
                .of_match_table = bcm_sysport_of_match,
+               .pm = &bcm_sysport_pm_ops,
        },
 };
 module_platform_driver(bcm_sysport_driver);
index 281c082..b08dab8 100644 (file)
@@ -246,6 +246,15 @@ struct bcm_rsb {
 #define  MIB_RX_CNT_RST                        (1 << 0)
 #define  MIB_RUNT_CNT_RST              (1 << 1)
 #define  MIB_TX_CNT_RST                        (1 << 2)
+
+#define UMAC_MPD_CTRL                  0x620
+#define  MPD_EN                                (1 << 0)
+#define  MSEQ_LEN_SHIFT                        16
+#define  MSEQ_LEN_MASK                 0xff
+#define  PSW_EN                                (1 << 27)
+
+#define UMAC_PSW_MS                    0x624
+#define UMAC_PSW_LS                    0x628
 #define UMAC_MDF_CTRL                  0x650
 #define UMAC_MDF_ADDR                  0x654
 
@@ -642,6 +651,7 @@ struct bcm_sysport_priv {
        struct platform_device  *pdev;
        int                     irq0;
        int                     irq1;
+       int                     wol_irq;
 
        /* Transmit rings */
        struct bcm_sysport_tx_ring tx_rings[TDMA_NUM_RINGS];
@@ -664,10 +674,12 @@ struct bcm_sysport_priv {
        int                     old_duplex;
 
        /* Misc fields */
-       unsigned int            rx_csum_en:1;
+       unsigned int            rx_chk_en:1;
        unsigned int            tsb_en:1;
        unsigned int            crc_fwd:1;
        u16                     rev;
+       u32                     wolopts;
+       unsigned int            wol_irq_disabled:1;
 
        /* MIB related fields */
        struct bcm_sysport_mib  mib;
index 67d2b00..e64c963 100644 (file)
@@ -1,6 +1,7 @@
-/* bnx2.c: Broadcom NX2 network driver.
+/* bnx2.c: QLogic NX2 network driver.
  *
- * Copyright (c) 2004-2013 Broadcom Corporation
+ * Copyright (c) 2004-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #define TX_TIMEOUT  (5*HZ)
 
 static char version[] =
-       "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
+       "QLogic NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
-MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
+MODULE_DESCRIPTION("QLogic NetXtreme II BCM5706/5708/5709/5716 Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 MODULE_FIRMWARE(FW_MIPS_FILE_06);
index e341bc3..28df35d 100644 (file)
@@ -1,6 +1,7 @@
-/* bnx2.h: Broadcom NX2 network driver.
+/* bnx2.h: QLogic NX2 network driver.
  *
- * Copyright (c) 2004-2013 Broadcom Corporation
+ * Copyright (c) 2004-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 940eb91..7db79c2 100644 (file)
@@ -1,6 +1,7 @@
-/* bnx2_fw.h: Broadcom NX2 network driver.
+/* bnx2_fw.h: QLogic NX2 network driver.
  *
  * Copyright (c) 2004, 2005, 2006, 2007 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 4cab09d..d777fae 100644 (file)
@@ -346,6 +346,7 @@ struct sw_tx_bd {
        u8              flags;
 /* Set on the first BD descriptor when there is a split BD */
 #define BNX2X_TSO_SPLIT_BD             (1<<0)
+#define BNX2X_HAS_SECOND_PBD           (1<<1)
 };
 
 struct sw_rx_page {
@@ -1482,6 +1483,7 @@ struct bnx2x {
        union pf_vf_bulletin   *pf2vf_bulletin;
        dma_addr_t              pf2vf_bulletin_mapping;
 
+       union pf_vf_bulletin            shadow_bulletin;
        struct pf_vf_bulletin_content   old_bulletin;
 
        u16 requested_nr_virtfn;
@@ -1507,8 +1509,10 @@ struct bnx2x {
 /* TCP with Timestamp Option (32) + IPv6 (40) */
 #define ETH_MAX_TPA_HEADER_SIZE                72
 
-       /* Max supported alignment is 256 (8 shift) */
-#define BNX2X_RX_ALIGN_SHIFT           min(8, L1_CACHE_SHIFT)
+       /* Max supported alignment is 256 (8 shift)
+        * minimal alignment shift 6 is optimal for 57xxx HW performance
+        */
+#define BNX2X_RX_ALIGN_SHIFT           max(6, min(8, L1_CACHE_SHIFT))
 
        /* FW uses 2 Cache lines Alignment for start packet and size
         *
@@ -1928,6 +1932,8 @@ struct bnx2x {
        struct semaphore                        stats_sema;
 
        u8                                      phys_port_id[ETH_ALEN];
+
+       struct bnx2x_link_report_data           vf_link_vars;
 };
 
 /* Tx queues may be less or equal to Rx queues */
index 47c5814..4e6c82e 100644 (file)
@@ -227,6 +227,12 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
        --nbd;
        bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
 
+       if (tx_buf->flags & BNX2X_HAS_SECOND_PBD) {
+               /* Skip second parse bd... */
+               --nbd;
+               bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
+       }
+
        /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */
        if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
                tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd;
@@ -797,7 +803,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
 
                return;
        }
-       bnx2x_frag_free(fp, new_data);
+       if (new_data)
+               bnx2x_frag_free(fp, new_data);
 drop:
        /* drop the packet and keep the buffer in the bin */
        DP(NETIF_MSG_RX_STATUS,
@@ -1185,29 +1192,38 @@ u16 bnx2x_get_mf_speed(struct bnx2x *bp)
 static void bnx2x_fill_report_data(struct bnx2x *bp,
                                   struct bnx2x_link_report_data *data)
 {
-       u16 line_speed = bnx2x_get_mf_speed(bp);
-
        memset(data, 0, sizeof(*data));
 
-       /* Fill the report data: effective line speed */
-       data->line_speed = line_speed;
-
-       /* Link is down */
-       if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
-               __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
-                         &data->link_report_flags);
-
-       /* Full DUPLEX */
-       if (bp->link_vars.duplex == DUPLEX_FULL)
-               __set_bit(BNX2X_LINK_REPORT_FD, &data->link_report_flags);
-
-       /* Rx Flow Control is ON */
-       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
-               __set_bit(BNX2X_LINK_REPORT_RX_FC_ON, &data->link_report_flags);
-
-       /* Tx Flow Control is ON */
-       if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
-               __set_bit(BNX2X_LINK_REPORT_TX_FC_ON, &data->link_report_flags);
+       if (IS_PF(bp)) {
+               /* Fill the report data: effective line speed */
+               data->line_speed = bnx2x_get_mf_speed(bp);
+
+               /* Link is down */
+               if (!bp->link_vars.link_up || (bp->flags & MF_FUNC_DIS))
+                       __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                 &data->link_report_flags);
+
+               if (!BNX2X_NUM_ETH_QUEUES(bp))
+                       __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                 &data->link_report_flags);
+
+               /* Full DUPLEX */
+               if (bp->link_vars.duplex == DUPLEX_FULL)
+                       __set_bit(BNX2X_LINK_REPORT_FD,
+                                 &data->link_report_flags);
+
+               /* Rx Flow Control is ON */
+               if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX)
+                       __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+                                 &data->link_report_flags);
+
+               /* Tx Flow Control is ON */
+               if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
+                       __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+                                 &data->link_report_flags);
+       } else { /* VF */
+               *data = bp->vf_link_vars;
+       }
 }
 
 /**
@@ -1261,6 +1277,10 @@ void __bnx2x_link_report(struct bnx2x *bp)
         */
        memcpy(&bp->last_reported_link, &cur_data, sizeof(cur_data));
 
+       /* propagate status to VFs */
+       if (IS_PF(bp))
+               bnx2x_iov_link_update(bp);
+
        if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
                     &cur_data.link_report_flags)) {
                netif_carrier_off(bp->dev);
@@ -3888,6 +3908,9 @@ netdev_tx_t bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
                        /* set encapsulation flag in start BD */
                        SET_FLAG(tx_start_bd->general_data,
                                 ETH_TX_START_BD_TUNNEL_EXIST, 1);
+
+                       tx_buf->flags |= BNX2X_HAS_SECOND_PBD;
+
                        nbd++;
                } else if (xmit_type & XMIT_CSUM) {
                        /* Set PBD in checksum offload case w/o encapsulation */
index 51a952c..fb26bc4 100644 (file)
@@ -2303,8 +2303,8 @@ static int bnx2x_set_admin_app_up(struct bnx2x *bp, u8 idtype, u16 idval, u8 up)
        return 0;
 }
 
-static u8 bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
-                                u16 idval, u8 up)
+static int bnx2x_dcbnl_set_app_up(struct net_device *netdev, u8 idtype,
+                                 u16 idval, u8 up)
 {
        struct bnx2x *bp = netdev_priv(netdev);
 
index bd0600c..92fee84 100644 (file)
@@ -216,6 +216,43 @@ static int bnx2x_get_port_type(struct bnx2x *bp)
        return port_type;
 }
 
+static int bnx2x_get_vf_settings(struct net_device *dev,
+                                struct ethtool_cmd *cmd)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+
+       if (bp->state == BNX2X_STATE_OPEN) {
+               if (test_bit(BNX2X_LINK_REPORT_FD,
+                            &bp->vf_link_vars.link_report_flags))
+                       cmd->duplex = DUPLEX_FULL;
+               else
+                       cmd->duplex = DUPLEX_HALF;
+
+               ethtool_cmd_speed_set(cmd, bp->vf_link_vars.line_speed);
+       } else {
+               cmd->duplex = DUPLEX_UNKNOWN;
+               ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
+       }
+
+       cmd->port               = PORT_OTHER;
+       cmd->phy_address        = 0;
+       cmd->transceiver        = XCVR_INTERNAL;
+       cmd->autoneg            = AUTONEG_DISABLE;
+       cmd->maxtxpkt           = 0;
+       cmd->maxrxpkt           = 0;
+
+       DP(BNX2X_MSG_ETHTOOL, "ethtool_cmd: cmd %d\n"
+          "  supported 0x%x  advertising 0x%x  speed %u\n"
+          "  duplex %d  port %d  phy_address %d  transceiver %d\n"
+          "  autoneg %d  maxtxpkt %d  maxrxpkt %d\n",
+          cmd->cmd, cmd->supported, cmd->advertising,
+          ethtool_cmd_speed(cmd),
+          cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
+          cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
+
+       return 0;
+}
+
 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
 {
        struct bnx2x *bp = netdev_priv(dev);
@@ -379,6 +416,7 @@ static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
                        break;
                case PORT_FIBRE:
                case PORT_DA:
+               case PORT_NONE:
                        if (!(bp->port.supported[0] & SUPPORTED_FIBRE ||
                              bp->port.supported[1] & SUPPORTED_FIBRE)) {
                                DP(BNX2X_MSG_ETHTOOL,
@@ -1110,6 +1148,10 @@ static u32 bnx2x_get_link(struct net_device *dev)
        if (bp->flags & MF_FUNC_DIS || (bp->state != BNX2X_STATE_OPEN))
                return 0;
 
+       if (IS_VF(bp))
+               return !test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                &bp->vf_link_vars.link_report_flags);
+
        return bp->link_vars.link_up;
 }
 
@@ -3484,8 +3526,7 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
 };
 
 static const struct ethtool_ops bnx2x_vf_ethtool_ops = {
-       .get_settings           = bnx2x_get_settings,
-       .set_settings           = bnx2x_set_settings,
+       .get_settings           = bnx2x_get_vf_settings,
        .get_drvinfo            = bnx2x_get_drvinfo,
        .get_msglevel           = bnx2x_get_msglevel,
        .set_msglevel           = bnx2x_set_msglevel,
index 2887034..3871ec4 100644 (file)
@@ -2698,6 +2698,14 @@ void bnx2x__link_status_update(struct bnx2x *bp)
                bp->link_vars.duplex = DUPLEX_FULL;
                bp->link_vars.flow_ctrl = BNX2X_FLOW_CTRL_NONE;
                __bnx2x_link_report(bp);
+
+               bnx2x_sample_bulletin(bp);
+
+               /* if bulletin board did not have an update for link status
+                * __bnx2x_link_report will report current status
+                * but it will NOT duplicate report in case of already reported
+                * during sampling bulletin board.
+                */
                bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
        }
 }
@@ -12424,6 +12432,7 @@ static const struct net_device_ops bnx2x_netdev_ops = {
        .ndo_busy_poll          = bnx2x_low_latency_recv,
 #endif
        .ndo_get_phys_port_id   = bnx2x_get_phys_port_id,
+       .ndo_set_vf_link_state  = bnx2x_set_vf_link_state,
 };
 
 static int bnx2x_set_coherency_mask(struct bnx2x *bp)
@@ -12937,7 +12946,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
         * without the default SB.
         * For VFs there is no default SB, then we return (index+1).
         */
-       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control);
+       pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
 
        index = control & PCI_MSIX_FLAGS_QSIZE;
 
index eda8583..662310c 100644 (file)
 #include <linux/crc32.h>
 #include <linux/if_vlan.h>
 
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+                           struct bnx2x_virtf **vf,
+                           struct pf_vf_bulletin_content **bulletin,
+                           bool test_queue);
+
 /* General service functions */
 static void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
                                         u16 pf_id)
@@ -597,8 +602,7 @@ int bnx2x_vf_mcast(struct bnx2x *bp, struct bnx2x_virtf *vf,
        rc = bnx2x_config_mcast(bp, &mcast, BNX2X_MCAST_CMD_DEL);
        if (rc) {
                BNX2X_ERR("Failed to remove multicasts\n");
-               if (mc)
-                       kfree(mc);
+               kfree(mc);
                return rc;
        }
 
@@ -1328,6 +1332,8 @@ int bnx2x_iov_init_one(struct bnx2x *bp, int int_mode_param,
        /* Prepare the VFs event synchronization mechanism */
        mutex_init(&bp->vfdb->event_mutex);
 
+       mutex_init(&bp->vfdb->bulletin_mutex);
+
        return 0;
 failed:
        DP(BNX2X_MSG_IOV, "Failed err=%d\n", err);
@@ -1473,6 +1479,107 @@ static void bnx2x_vfq_init(struct bnx2x *bp, struct bnx2x_virtf *vf,
           vf->abs_vfid, q->sp_obj.func_id, q->cid);
 }
 
+static int bnx2x_max_speed_cap(struct bnx2x *bp)
+{
+       u32 supported = bp->port.supported[bnx2x_get_link_cfg_idx(bp)];
+
+       if (supported &
+           (SUPPORTED_20000baseMLD2_Full | SUPPORTED_20000baseKR2_Full))
+               return 20000;
+
+       return 10000; /* assume lowest supported speed is 10G */
+}
+
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx)
+{
+       struct bnx2x_link_report_data *state = &bp->last_reported_link;
+       struct pf_vf_bulletin_content *bulletin;
+       struct bnx2x_virtf *vf;
+       bool update = true;
+       int rc = 0;
+
+       /* sanity and init */
+       rc = bnx2x_vf_op_prep(bp, idx, &vf, &bulletin, false);
+       if (rc)
+               return rc;
+
+       mutex_lock(&bp->vfdb->bulletin_mutex);
+
+       if (vf->link_cfg == IFLA_VF_LINK_STATE_AUTO) {
+               bulletin->valid_bitmap |= 1 << LINK_VALID;
+
+               bulletin->link_speed = state->line_speed;
+               bulletin->link_flags = 0;
+               if (test_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+               if (test_bit(BNX2X_LINK_REPORT_FD,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_FULL_DUPLEX;
+               if (test_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_RX_FC_ON;
+               if (test_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+                            &state->link_report_flags))
+                       bulletin->link_flags |= VFPF_LINK_REPORT_TX_FC_ON;
+       } else if (vf->link_cfg == IFLA_VF_LINK_STATE_DISABLE &&
+                  !(bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+               bulletin->valid_bitmap |= 1 << LINK_VALID;
+               bulletin->link_flags |= VFPF_LINK_REPORT_LINK_DOWN;
+       } else if (vf->link_cfg == IFLA_VF_LINK_STATE_ENABLE &&
+                  (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)) {
+               bulletin->valid_bitmap |= 1 << LINK_VALID;
+               bulletin->link_speed = bnx2x_max_speed_cap(bp);
+               bulletin->link_flags &= ~VFPF_LINK_REPORT_LINK_DOWN;
+       } else {
+               update = false;
+       }
+
+       if (update) {
+               DP(NETIF_MSG_LINK | BNX2X_MSG_IOV,
+                  "vf %d mode %u speed %d flags %x\n", idx,
+                  vf->link_cfg, bulletin->link_speed, bulletin->link_flags);
+
+               /* Post update on VF's bulletin board */
+               rc = bnx2x_post_vf_bulletin(bp, idx);
+               if (rc) {
+                       BNX2X_ERR("failed to update VF[%d] bulletin\n", idx);
+                       goto out;
+               }
+       }
+
+out:
+       mutex_unlock(&bp->vfdb->bulletin_mutex);
+       return rc;
+}
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int idx, int link_state)
+{
+       struct bnx2x *bp = netdev_priv(dev);
+       struct bnx2x_virtf *vf = BP_VF(bp, idx);
+
+       if (!vf)
+               return -EINVAL;
+
+       if (vf->link_cfg == link_state)
+               return 0; /* nothing todo */
+
+       vf->link_cfg = link_state;
+
+       return bnx2x_iov_link_update_vf(bp, idx);
+}
+
+void bnx2x_iov_link_update(struct bnx2x *bp)
+{
+       int vfid;
+
+       if (!IS_SRIOV(bp))
+               return;
+
+       for_each_vf(bp, vfid)
+               bnx2x_iov_link_update_vf(bp, vfid);
+}
+
 /* called by bnx2x_nic_load */
 int bnx2x_iov_nic_init(struct bnx2x *bp)
 {
@@ -2510,22 +2617,23 @@ void bnx2x_disable_sriov(struct bnx2x *bp)
        pci_disable_sriov(bp->pdev);
 }
 
-static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
-                            struct bnx2x_virtf **vf,
-                            struct pf_vf_bulletin_content **bulletin)
+static int bnx2x_vf_op_prep(struct bnx2x *bp, int vfidx,
+                           struct bnx2x_virtf **vf,
+                           struct pf_vf_bulletin_content **bulletin,
+                           bool test_queue)
 {
        if (bp->state != BNX2X_STATE_OPEN) {
-               BNX2X_ERR("vf ndo called though PF is down\n");
+               BNX2X_ERR("PF is down - can't utilize iov-related functionality\n");
                return -EINVAL;
        }
 
        if (!IS_SRIOV(bp)) {
-               BNX2X_ERR("vf ndo called though sriov is disabled\n");
+               BNX2X_ERR("sriov is disabled - can't utilize iov-realted functionality\n");
                return -EINVAL;
        }
 
        if (vfidx >= BNX2X_NR_VIRTFN(bp)) {
-               BNX2X_ERR("vf ndo called for uninitialized VF. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
+               BNX2X_ERR("VF is uninitialized - can't utilize iov-related functionality. vfidx was %d BNX2X_NR_VIRTFN was %d\n",
                          vfidx, BNX2X_NR_VIRTFN(bp));
                return -EINVAL;
        }
@@ -2535,19 +2643,18 @@ static int bnx2x_vf_ndo_prep(struct bnx2x *bp, int vfidx,
        *bulletin = BP_VF_BULLETIN(bp, vfidx);
 
        if (!*vf) {
-               BNX2X_ERR("vf ndo called but vf struct is null. vfidx was %d\n",
-                         vfidx);
+               BNX2X_ERR("Unable to get VF structure for vfidx %d\n", vfidx);
                return -EINVAL;
        }
 
-       if (!(*vf)->vfqs) {
-               BNX2X_ERR("vf ndo called but vfqs struct is null. Was ndo invoked before dynamically enabling SR-IOV? vfidx was %d\n",
+       if (test_queue && !(*vf)->vfqs) {
+               BNX2X_ERR("vfqs struct is null. Was this invoked before dynamically enabling SR-IOV? vfidx was %d\n",
                          vfidx);
                return -EINVAL;
        }
 
        if (!*bulletin) {
-               BNX2X_ERR("vf ndo called but Bulletin Board struct is null. vfidx was %d\n",
+               BNX2X_ERR("Bulletin Board struct is null for vfidx %d\n",
                          vfidx);
                return -EINVAL;
        }
@@ -2566,9 +2673,10 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
        int rc;
 
        /* sanity and init */
-       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
+       rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
        if (rc)
                return rc;
+
        mac_obj = &bnx2x_leading_vfq(vf, mac_obj);
        vlan_obj = &bnx2x_leading_vfq(vf, vlan_obj);
        if (!mac_obj || !vlan_obj) {
@@ -2591,6 +2699,7 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                                                 VLAN_HLEN);
                }
        } else {
+               mutex_lock(&bp->vfdb->bulletin_mutex);
                /* mac */
                if (bulletin->valid_bitmap & (1 << MAC_ADDR_VALID))
                        /* mac configured by ndo so its in bulletin board */
@@ -2606,6 +2715,8 @@ int bnx2x_get_vf_config(struct net_device *dev, int vfidx,
                else
                        /* function has not been loaded yet. Show vlans as 0s */
                        memset(&ivi->vlan, 0, VLAN_HLEN);
+
+               mutex_unlock(&bp->vfdb->bulletin_mutex);
        }
 
        return 0;
@@ -2635,15 +2746,18 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
        struct bnx2x_virtf *vf = NULL;
        struct pf_vf_bulletin_content *bulletin = NULL;
 
-       /* sanity and init */
-       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
-       if (rc)
-               return rc;
        if (!is_valid_ether_addr(mac)) {
                BNX2X_ERR("mac address invalid\n");
                return -EINVAL;
        }
 
+       /* sanity and init */
+       rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+       if (rc)
+               return rc;
+
+       mutex_lock(&bp->vfdb->bulletin_mutex);
+
        /* update PF's copy of the VF's bulletin. Will no longer accept mac
         * configuration requests from vf unless match this mac
         */
@@ -2652,6 +2766,10 @@ int bnx2x_set_vf_mac(struct net_device *dev, int vfidx, u8 *mac)
 
        /* Post update on VF's bulletin board */
        rc = bnx2x_post_vf_bulletin(bp, vfidx);
+
+       /* release lock before checking return code */
+       mutex_unlock(&bp->vfdb->bulletin_mutex);
+
        if (rc) {
                BNX2X_ERR("failed to update VF[%d] bulletin\n", vfidx);
                return rc;
@@ -2716,11 +2834,6 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
        unsigned long accept_flags;
        int rc;
 
-       /* sanity and init */
-       rc = bnx2x_vf_ndo_prep(bp, vfidx, &vf, &bulletin);
-       if (rc)
-               return rc;
-
        if (vlan > 4095) {
                BNX2X_ERR("illegal vlan value %d\n", vlan);
                return -EINVAL;
@@ -2729,18 +2842,27 @@ int bnx2x_set_vf_vlan(struct net_device *dev, int vfidx, u16 vlan, u8 qos)
        DP(BNX2X_MSG_IOV, "configuring VF %d with VLAN %d qos %d\n",
           vfidx, vlan, 0);
 
+       /* sanity and init */
+       rc = bnx2x_vf_op_prep(bp, vfidx, &vf, &bulletin, true);
+       if (rc)
+               return rc;
+
        /* update PF's copy of the VF's bulletin. No point in posting the vlan
         * to the VF since it doesn't have anything to do with it. But it useful
         * to store it here in case the VF is not up yet and we can only
         * configure the vlan later when it does. Treat vlan id 0 as remove the
         * Host tag.
         */
+       mutex_lock(&bp->vfdb->bulletin_mutex);
+
        if (vlan > 0)
                bulletin->valid_bitmap |= 1 << VLAN_VALID;
        else
                bulletin->valid_bitmap &= ~(1 << VLAN_VALID);
        bulletin->vlan = vlan;
 
+       mutex_unlock(&bp->vfdb->bulletin_mutex);
+
        /* is vf initialized and queue set up? */
        if (vf->state != VF_ENABLED ||
            bnx2x_get_q_logical_state(bp, &bnx2x_leading_vfq(vf, sp_obj)) !=
@@ -2850,10 +2972,9 @@ out:
  * entire bulletin board excluding the crc field itself. Use the length field
  * as the Bulletin Board was posted by a PF with possibly a different version
  * from the vf which will sample it. Therefore, the length is computed by the
- * PF and the used blindly by the VF.
+ * PF and then used blindly by the VF.
  */
-u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
-                         struct pf_vf_bulletin_content *bulletin)
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin)
 {
        return crc32(BULLETIN_CRC_SEED,
                 ((u8 *)bulletin) + sizeof(bulletin->crc),
@@ -2863,47 +2984,74 @@ u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
 /* Check for new posts on the bulletin board */
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp)
 {
-       struct pf_vf_bulletin_content bulletin = bp->pf2vf_bulletin->content;
+       struct pf_vf_bulletin_content *bulletin;
        int attempts;
 
-       /* bulletin board hasn't changed since last sample */
-       if (bp->old_bulletin.version == bulletin.version)
-               return PFVF_BULLETIN_UNCHANGED;
+       /* sampling structure in mid post may result with corrupted data
+        * validate crc to ensure coherency.
+        */
+       for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
+               u32 crc;
 
-       /* validate crc of new bulletin board */
-       if (bp->old_bulletin.version != bp->pf2vf_bulletin->content.version) {
-               /* sampling structure in mid post may result with corrupted data
-                * validate crc to ensure coherency.
-                */
-               for (attempts = 0; attempts < BULLETIN_ATTEMPTS; attempts++) {
-                       bulletin = bp->pf2vf_bulletin->content;
-                       if (bulletin.crc == bnx2x_crc_vf_bulletin(bp,
-                                                                 &bulletin))
-                               break;
-                       BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
-                                 bulletin.crc,
-                                 bnx2x_crc_vf_bulletin(bp, &bulletin));
-               }
-               if (attempts >= BULLETIN_ATTEMPTS) {
-                       BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
-                                 attempts);
-                       return PFVF_BULLETIN_CRC_ERR;
-               }
+               /* sample the bulletin board */
+               memcpy(&bp->shadow_bulletin, bp->pf2vf_bulletin,
+                      sizeof(union pf_vf_bulletin));
+
+               crc = bnx2x_crc_vf_bulletin(&bp->shadow_bulletin.content);
+
+               if (bp->shadow_bulletin.content.crc == crc)
+                       break;
+
+               BNX2X_ERR("bad crc on bulletin board. Contained %x computed %x\n",
+                         bp->shadow_bulletin.content.crc, crc);
+       }
+
+       if (attempts >= BULLETIN_ATTEMPTS) {
+               BNX2X_ERR("pf to vf bulletin board crc was wrong %d consecutive times. Aborting\n",
+                         attempts);
+               return PFVF_BULLETIN_CRC_ERR;
        }
+       bulletin = &bp->shadow_bulletin.content;
+
+       /* bulletin board hasn't changed since last sample */
+       if (bp->old_bulletin.version == bulletin->version)
+               return PFVF_BULLETIN_UNCHANGED;
 
        /* the mac address in bulletin board is valid and is new */
-       if (bulletin.valid_bitmap & 1 << MAC_ADDR_VALID &&
-           !ether_addr_equal(bulletin.mac, bp->old_bulletin.mac)) {
+       if (bulletin->valid_bitmap & 1 << MAC_ADDR_VALID &&
+           !ether_addr_equal(bulletin->mac, bp->old_bulletin.mac)) {
                /* update new mac to net device */
-               memcpy(bp->dev->dev_addr, bulletin.mac, ETH_ALEN);
+               memcpy(bp->dev->dev_addr, bulletin->mac, ETH_ALEN);
+       }
+
+       if (bulletin->valid_bitmap & (1 << LINK_VALID)) {
+               DP(BNX2X_MSG_IOV, "link update speed %d flags %x\n",
+                  bulletin->link_speed, bulletin->link_flags);
+
+               bp->vf_link_vars.line_speed = bulletin->link_speed;
+               bp->vf_link_vars.link_report_flags = 0;
+               /* Link is down */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_LINK_DOWN)
+                       __set_bit(BNX2X_LINK_REPORT_LINK_DOWN,
+                                 &bp->vf_link_vars.link_report_flags);
+               /* Full DUPLEX */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_FULL_DUPLEX)
+                       __set_bit(BNX2X_LINK_REPORT_FD,
+                                 &bp->vf_link_vars.link_report_flags);
+               /* Rx Flow Control is ON */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_RX_FC_ON)
+                       __set_bit(BNX2X_LINK_REPORT_RX_FC_ON,
+                                 &bp->vf_link_vars.link_report_flags);
+               /* Tx Flow Control is ON */
+               if (bulletin->link_flags & VFPF_LINK_REPORT_TX_FC_ON)
+                       __set_bit(BNX2X_LINK_REPORT_TX_FC_ON,
+                                 &bp->vf_link_vars.link_report_flags);
+               __bnx2x_link_report(bp);
        }
 
-       /* the vlan in bulletin board is valid and is new */
-       if (bulletin.valid_bitmap & 1 << VLAN_VALID)
-               memcpy(&bulletin.vlan, &bp->old_bulletin.vlan, VLAN_HLEN);
-
        /* copy new bulletin board to bp */
-       bp->old_bulletin = bulletin;
+       memcpy(&bp->old_bulletin, bulletin,
+              sizeof(struct pf_vf_bulletin_content));
 
        return PFVF_BULLETIN_UPDATED;
 }
@@ -2948,6 +3096,8 @@ int bnx2x_vf_pci_alloc(struct bnx2x *bp)
        if (!bp->pf2vf_bulletin)
                goto alloc_mem_err;
 
+       bnx2x_vf_bulletin_finalize(&bp->pf2vf_bulletin->content, true);
+
        return 0;
 
 alloc_mem_err:
index 96c575e..ca1055f 100644 (file)
@@ -126,7 +126,11 @@ struct bnx2x_virtf {
 #define VF_CACHE_LINE          0x0010
 #define VF_CFG_VLAN            0x0020
 #define VF_CFG_STATS_COALESCE  0x0040
-
+#define VF_CFG_EXT_BULLETIN    0x0080
+       u8 link_cfg;            /* IFLA_VF_LINK_STATE_AUTO
+                                * IFLA_VF_LINK_STATE_ENABLE
+                                * IFLA_VF_LINK_STATE_DISABLE
+                                */
        u8 state;
 #define VF_FREE                0       /* VF ready to be acquired holds no resc */
 #define VF_ACQUIRED    1       /* VF acquired, but not initialized */
@@ -295,22 +299,22 @@ struct bnx2x_vfdb {
 #define BP_VFDB(bp)            ((bp)->vfdb)
        /* vf array */
        struct bnx2x_virtf      *vfs;
-#define BP_VF(bp, idx)         (&((bp)->vfdb->vfs[(idx)]))
-#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[(idx)].var)
+#define BP_VF(bp, idx)         (&((bp)->vfdb->vfs[idx]))
+#define bnx2x_vf(bp, idx, var) ((bp)->vfdb->vfs[idx].var)
 
        /* queue array - for all vfs */
        struct bnx2x_vf_queue *vfqs;
 
        /* vf HW contexts */
        struct hw_dma           context[BNX2X_VF_CIDS/ILT_PAGE_CIDS];
-#define        BP_VF_CXT_PAGE(bp, i)   (&(bp)->vfdb->context[(i)])
+#define        BP_VF_CXT_PAGE(bp, i)   (&(bp)->vfdb->context[i])
 
        /* SR-IOV information */
        struct bnx2x_sriov      sriov;
        struct hw_dma           mbx_dma;
 #define BP_VF_MBX_DMA(bp)      (&((bp)->vfdb->mbx_dma))
        struct bnx2x_vf_mbx     mbxs[BNX2X_MAX_NUM_OF_VFS];
-#define BP_VF_MBX(bp, vfid)    (&((bp)->vfdb->mbxs[(vfid)]))
+#define BP_VF_MBX(bp, vfid)    (&((bp)->vfdb->mbxs[vfid]))
 
        struct hw_dma           bulletin_dma;
 #define BP_VF_BULLETIN_DMA(bp) (&((bp)->vfdb->bulletin_dma))
@@ -336,6 +340,9 @@ struct bnx2x_vfdb {
        /* sp_rtnl synchronization */
        struct mutex                    event_mutex;
        u64                             event_occur;
+
+       /* bulletin board update synchronization */
+       struct mutex                    bulletin_mutex;
 };
 
 /* queue access */
@@ -467,9 +474,10 @@ void bnx2x_vf_handle_flr_event(struct bnx2x *bp);
 
 bool bnx2x_tlv_supported(u16 tlvtype);
 
-u32 bnx2x_crc_vf_bulletin(struct bnx2x *bp,
-                         struct pf_vf_bulletin_content *bulletin);
+u32 bnx2x_crc_vf_bulletin(struct pf_vf_bulletin_content *bulletin);
 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf);
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+                               bool support_long);
 
 enum sample_bulletin_result bnx2x_sample_bulletin(struct bnx2x *bp);
 
@@ -520,6 +528,11 @@ void bnx2x_iov_task(struct work_struct *work);
 
 void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag);
 
+void bnx2x_iov_link_update(struct bnx2x *bp);
+int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx);
+
+int bnx2x_set_vf_link_state(struct net_device *dev, int vf, int link_state);
+
 #else /* CONFIG_BNX2X_SRIOV */
 
 static inline void bnx2x_iov_set_queue_sp_obj(struct bnx2x *bp, int vf_cid,
@@ -579,6 +592,14 @@ static inline void bnx2x_iov_channel_down(struct bnx2x *bp) {}
 
 static inline void bnx2x_iov_task(struct work_struct *work) {}
 static inline void bnx2x_schedule_iov_task(struct bnx2x *bp, enum bnx2x_iov_flag flag) {}
+static inline void bnx2x_iov_link_update(struct bnx2x *bp) {}
+static inline int bnx2x_iov_link_update_vf(struct bnx2x *bp, int idx) {return 0; }
+
+static inline int bnx2x_set_vf_link_state(struct net_device *dev, int vf,
+                                         int link_state) {return 0; }
+struct pf_vf_bulletin_content;
+static inline void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+                                             bool support_long) {}
 
 #endif /* CONFIG_BNX2X_SRIOV */
 #endif /* bnx2x_sriov.h */
index d712d0d..54e0427 100644 (file)
@@ -251,6 +251,9 @@ int bnx2x_vfpf_acquire(struct bnx2x *bp, u8 tx_count, u8 rx_count)
        bnx2x_add_tlv(bp, req, req->first_tlv.tl.length,
                      CHANNEL_TLV_PHYS_PORT_ID, sizeof(struct channel_tlv));
 
+       /* Bulletin support for bulletin board with length > legacy length */
+       req->vfdev_info.caps |= VF_CAP_SUPPORT_EXT_BULLETIN;
+
        /* add list termination tlv */
        bnx2x_add_tlv(bp, req,
                      req->first_tlv.tl.length + sizeof(struct channel_tlv),
@@ -1232,6 +1235,41 @@ static void bnx2x_vf_mbx_acquire_resp(struct bnx2x *bp, struct bnx2x_virtf *vf,
        bnx2x_vf_mbx_resp_send_msg(bp, vf, vfop_status);
 }
 
+static bool bnx2x_vf_mbx_is_windows_vm(struct bnx2x *bp,
+                                      struct vfpf_acquire_tlv *acquire)
+{
+       /* Windows driver does one of three things:
+        * 1. Old driver doesn't have bulletin board address set.
+        * 2. 'Middle' driver sends mc_num == 32.
+        * 3. New driver sets the OS field.
+        */
+       if (!acquire->bulletin_addr ||
+           acquire->resc_request.num_mc_filters == 32 ||
+           ((acquire->vfdev_info.vf_os & VF_OS_MASK) ==
+            VF_OS_WINDOWS))
+               return true;
+
+       return false;
+}
+
+static int bnx2x_vf_mbx_acquire_chk_dorq(struct bnx2x *bp,
+                                        struct bnx2x_virtf *vf,
+                                        struct bnx2x_vf_mbx *mbx)
+{
+       /* Linux drivers which correctly set the doorbell size also
+        * send a physical port request
+        */
+       if (bnx2x_search_tlv_list(bp, &mbx->msg->req,
+                                 CHANNEL_TLV_PHYS_PORT_ID))
+               return 0;
+
+       /* Issue does not exist in windows VMs */
+       if (bnx2x_vf_mbx_is_windows_vm(bp, &mbx->msg->req.acquire))
+               return 0;
+
+       return -EOPNOTSUPP;
+}
+
 static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
                                 struct bnx2x_vf_mbx *mbx)
 {
@@ -1247,12 +1285,32 @@ static void bnx2x_vf_mbx_acquire(struct bnx2x *bp, struct bnx2x_virtf *vf,
           acquire->resc_request.num_vlan_filters,
           acquire->resc_request.num_mc_filters);
 
+       /* Prevent VFs with old drivers from loading, since they calculate
+        * CIDs incorrectly requiring a VF-flr [VM reboot] in order to recover
+        * while being upgraded.
+        */
+       rc = bnx2x_vf_mbx_acquire_chk_dorq(bp, vf, mbx);
+       if (rc) {
+               DP(BNX2X_MSG_IOV,
+                  "VF [%d] - Can't support acquire request due to doorbell mismatch. Please update VM driver\n",
+                  vf->abs_vfid);
+               goto out;
+       }
+
        /* acquire the resources */
        rc = bnx2x_vf_acquire(bp, vf, &acquire->resc_request);
 
        /* store address of vf's bulletin board */
        vf->bulletin_map = acquire->bulletin_addr;
+       if (acquire->vfdev_info.caps & VF_CAP_SUPPORT_EXT_BULLETIN) {
+               DP(BNX2X_MSG_IOV, "VF[%d] supports long bulletin boards\n",
+                  vf->abs_vfid);
+               vf->cfg_flags |= VF_CFG_EXT_BULLETIN;
+       } else {
+               vf->cfg_flags &= ~VF_CFG_EXT_BULLETIN;
+       }
 
+out:
        /* response */
        bnx2x_vf_mbx_acquire_resp(bp, vf, mbx, rc);
 }
@@ -1273,6 +1331,10 @@ static void bnx2x_vf_mbx_init_vf(struct bnx2x *bp, struct bnx2x_virtf *vf,
        if (init->flags & VFPF_INIT_FLG_STATS_COALESCE)
                vf->cfg_flags |= VF_CFG_STATS_COALESCE;
 
+       /* Update VF's view of link state */
+       if (vf->cfg_flags & VF_CFG_EXT_BULLETIN)
+               bnx2x_iov_link_update_vf(bp, vf->index);
+
        /* response */
        bnx2x_vf_mbx_resp(bp, vf, rc);
 }
@@ -2007,6 +2069,17 @@ void bnx2x_vf_mbx(struct bnx2x *bp)
        }
 }
 
+void bnx2x_vf_bulletin_finalize(struct pf_vf_bulletin_content *bulletin,
+                               bool support_long)
+{
+       /* Older VFs contain a bug where they can't check CRC for bulletin
+        * boards of length greater than legacy size.
+        */
+       bulletin->length = support_long ? BULLETIN_CONTENT_SIZE :
+                                         BULLETIN_CONTENT_LEGACY_SIZE;
+       bulletin->crc = bnx2x_crc_vf_bulletin(bulletin);
+}
+
 /* propagate local bulletin board to vf */
 int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
 {
@@ -2023,8 +2096,9 @@ int bnx2x_post_vf_bulletin(struct bnx2x *bp, int vf)
 
        /* increment bulletin board version and compute crc */
        bulletin->version++;
-       bulletin->length = BULLETIN_CONTENT_SIZE;
-       bulletin->crc = bnx2x_crc_vf_bulletin(bp, bulletin);
+       bnx2x_vf_bulletin_finalize(bulletin,
+                                  (bnx2x_vf(bp, vf, cfg_flags) &
+                                   VF_CFG_EXT_BULLETIN) ? true : false);
 
        /* propagate bulletin board via dmae to vm memory */
        rc = bnx2x_copy32_vf_dmae(bp, false, pf_addr,
index e21e706..15670c4 100644 (file)
@@ -65,6 +65,7 @@ struct hw_sb_info {
 #define VFPF_RX_MASK_ACCEPT_ALL_MULTICAST      0x00000008
 #define VFPF_RX_MASK_ACCEPT_BROADCAST          0x00000010
 #define BULLETIN_CONTENT_SIZE          (sizeof(struct pf_vf_bulletin_content))
+#define BULLETIN_CONTENT_LEGACY_SIZE   (32)
 #define BULLETIN_ATTEMPTS      5 /* crc failures before throwing towel */
 #define BULLETIN_CRC_SEED      0
 
@@ -117,7 +118,15 @@ struct vfpf_acquire_tlv {
                /* the following fields are for debug purposes */
                u8  vf_id;              /* ME register value */
                u8  vf_os;              /* e.g. Linux, W2K8 */
-               u8 padding[2];
+#define VF_OS_SUBVERSION_MASK  (0x1f)
+#define VF_OS_MASK             (0xe0)
+#define VF_OS_SHIFT            (5)
+#define VF_OS_UNDEFINED                (0 << VF_OS_SHIFT)
+#define VF_OS_WINDOWS          (1 << VF_OS_SHIFT)
+
+               u8 padding;
+               u8 caps;
+#define VF_CAP_SUPPORT_EXT_BULLETIN    (1 << 0)
        } vfdev_info;
 
        struct vf_pf_resc_request resc_request;
@@ -393,11 +402,23 @@ struct pf_vf_bulletin_content {
                                         * to attempt to send messages on the
                                         * channel after this bit is set
                                         */
+#define LINK_VALID             3       /* alert the VF thet a new link status
+                                        * update is available for it
+                                        */
        u8 mac[ETH_ALEN];
        u8 mac_padding[2];
 
        u16 vlan;
        u8 vlan_padding[6];
+
+       u16 link_speed;                  /* Effective line speed */
+       u8 link_speed_padding[6];
+       u32 link_flags;                  /* VFPF_LINK_REPORT_XXX flags */
+#define VFPF_LINK_REPORT_LINK_DOWN      (1 << 0)
+#define VFPF_LINK_REPORT_FULL_DUPLEX    (1 << 1)
+#define VFPF_LINK_REPORT_RX_FC_ON       (1 << 2)
+#define VFPF_LINK_REPORT_TX_FC_ON       (1 << 3)
+       u8 link_flags_padding[4];
 };
 
 union pf_vf_bulletin {
index 8244e2b..27861a6 100644 (file)
@@ -1,13 +1,15 @@
-/* cnic.c: Broadcom CNIC core network driver.
+/* cnic.c: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
  * the Free Software Foundation.
  *
  * Original skeleton written by: John(Zongxi) Chen (zongxi@broadcom.com)
- * Modified and maintained by: Michael Chan <mchan@broadcom.com>
+ * Previously modified and maintained by: Michael Chan <mchan@broadcom.com>
+ * Maintained By: Dept-HSGLinuxNICDev@qlogic.com
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
 #define CNIC_MODULE_NAME       "cnic"
 
 static char version[] =
-       "Broadcom NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
+       "QLogic NetXtreme II CNIC Driver " CNIC_MODULE_NAME " v" CNIC_MODULE_VERSION " (" CNIC_MODULE_RELDATE ")\n";
 
 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com> and John(Zongxi) "
              "Chen (zongxi@broadcom.com");
-MODULE_DESCRIPTION("Broadcom NetXtreme II CNIC Driver");
+MODULE_DESCRIPTION("QLogic NetXtreme II CNIC Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(CNIC_MODULE_VERSION);
 
index d535ae4..4baea81 100644 (file)
@@ -1,6 +1,7 @@
-/* cnic.h: Broadcom CNIC core network driver.
+/* cnic.h: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index dcbca69..b384997 100644 (file)
@@ -1,7 +1,8 @@
 
-/* cnic.c: Broadcom CNIC core network driver.
+/* cnic.c: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 5f4d557..8bb36c1 100644 (file)
@@ -1,6 +1,7 @@
-/* cnic_if.h: Broadcom CNIC core network driver.
+/* cnic_if.h: QLogic CNIC core network driver.
  *
  * Copyright (c) 2006-2014 Broadcom Corporation
+ * Copyright (c) 2014 QLogic Corporation
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
index 31f55a9..9b6885e 100644 (file)
@@ -1,2 +1,2 @@
 obj-$(CONFIG_BCMGENET) += genet.o
-genet-objs := bcmgenet.o bcmmii.o
+genet-objs := bcmgenet.o bcmmii.o bcmgenet_wol.o
index 5ba1cfb..ce455ae 100644 (file)
@@ -6,15 +6,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 #define pr_fmt(fmt)                            "bcmgenet: " fmt
                                TOTAL_DESC * DMA_DESC_SIZE)
 
 static inline void dmadesc_set_length_status(struct bcmgenet_priv *priv,
-                                               void __iomem *d, u32 value)
+                                            void __iomem *d, u32 value)
 {
        __raw_writel(value, d + DMA_DESC_LENGTH_STATUS);
 }
 
 static inline u32 dmadesc_get_length_status(struct bcmgenet_priv *priv,
-                                               void __iomem *d)
+                                           void __iomem *d)
 {
        return __raw_readl(d + DMA_DESC_LENGTH_STATUS);
 }
@@ -98,7 +89,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
 
        /* Register writes to GISB bus can take couple hundred nanoseconds
         * and are done for each packet, save these expensive writes unless
-        * the platform is explicitely configured for 64-bits/LPAE.
+        * the platform is explicitly configured for 64-bits/LPAE.
         */
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
        if (priv->hw_params->flags & GENET_HAS_40BITS)
@@ -108,7 +99,7 @@ static inline void dmadesc_set_addr(struct bcmgenet_priv *priv,
 
 /* Combined address + length/status setter */
 static inline void dmadesc_set(struct bcmgenet_priv *priv,
-                               void __iomem *d, dma_addr_t addr, u32 val)
+                              void __iomem *d, dma_addr_t addr, u32 val)
 {
        dmadesc_set_length_status(priv, d, val);
        dmadesc_set_addr(priv, d, addr);
@@ -123,7 +114,7 @@ static inline dma_addr_t dmadesc_get_addr(struct bcmgenet_priv *priv,
 
        /* Register writes to GISB bus can take couple hundred nanoseconds
         * and are done for each packet, save these expensive writes unless
-        * the platform is explicitely configured for 64-bits/LPAE.
+        * the platform is explicitly configured for 64-bits/LPAE.
         */
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
        if (priv->hw_params->flags & GENET_HAS_40BITS)
@@ -242,7 +233,7 @@ static inline struct bcmgenet_priv *dev_to_priv(struct device *dev)
 }
 
 static inline u32 bcmgenet_tdma_readl(struct bcmgenet_priv *priv,
-                                       enum dma_reg r)
+                                     enum dma_reg r)
 {
        return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
                        DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
@@ -256,7 +247,7 @@ static inline void bcmgenet_tdma_writel(struct bcmgenet_priv *priv,
 }
 
 static inline u32 bcmgenet_rdma_readl(struct bcmgenet_priv *priv,
-                                       enum dma_reg r)
+                                     enum dma_reg r)
 {
        return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
                        DMA_RINGS_SIZE + bcmgenet_dma_regs[r]);
@@ -333,8 +324,8 @@ static const u8 genet_dma_ring_regs_v123[] = {
 static const u8 *genet_dma_ring_regs;
 
 static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
-                                               unsigned int ring,
-                                               enum dma_ring_reg r)
+                                          unsigned int ring,
+                                          enum dma_ring_reg r)
 {
        return __raw_readl(priv->base + GENET_TDMA_REG_OFF +
                        (DMA_RING_SIZE * ring) +
@@ -342,9 +333,8 @@ static inline u32 bcmgenet_tdma_ring_readl(struct bcmgenet_priv *priv,
 }
 
 static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
-                                               unsigned int ring,
-                                               u32 val,
-                                               enum dma_ring_reg r)
+                                            unsigned int ring, u32 val,
+                                            enum dma_ring_reg r)
 {
        __raw_writel(val, priv->base + GENET_TDMA_REG_OFF +
                        (DMA_RING_SIZE * ring) +
@@ -352,8 +342,8 @@ static inline void bcmgenet_tdma_ring_writel(struct bcmgenet_priv *priv,
 }
 
 static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
-                                               unsigned int ring,
-                                               enum dma_ring_reg r)
+                                          unsigned int ring,
+                                          enum dma_ring_reg r)
 {
        return __raw_readl(priv->base + GENET_RDMA_REG_OFF +
                        (DMA_RING_SIZE * ring) +
@@ -361,9 +351,8 @@ static inline u32 bcmgenet_rdma_ring_readl(struct bcmgenet_priv *priv,
 }
 
 static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
-                                               unsigned int ring,
-                                               u32 val,
-                                               enum dma_ring_reg r)
+                                            unsigned int ring, u32 val,
+                                            enum dma_ring_reg r)
 {
        __raw_writel(val, priv->base + GENET_RDMA_REG_OFF +
                        (DMA_RING_SIZE * ring) +
@@ -371,7 +360,7 @@ static inline void bcmgenet_rdma_ring_writel(struct bcmgenet_priv *priv,
 }
 
 static int bcmgenet_get_settings(struct net_device *dev,
-               struct ethtool_cmd *cmd)
+                                struct ethtool_cmd *cmd)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -385,7 +374,7 @@ static int bcmgenet_get_settings(struct net_device *dev,
 }
 
 static int bcmgenet_set_settings(struct net_device *dev,
-               struct ethtool_cmd *cmd)
+                                struct ethtool_cmd *cmd)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
 
@@ -458,7 +447,7 @@ static int bcmgenet_set_tx_csum(struct net_device *dev,
 }
 
 static int bcmgenet_set_features(struct net_device *dev,
-               netdev_features_t features)
+                                netdev_features_t features)
 {
        netdev_features_t changed = features ^ dev->features;
        netdev_features_t wanted = dev->wanted_features;
@@ -625,12 +614,11 @@ static const struct bcmgenet_stats bcmgenet_gstrings_stats[] = {
 #define BCMGENET_STATS_LEN     ARRAY_SIZE(bcmgenet_gstrings_stats)
 
 static void bcmgenet_get_drvinfo(struct net_device *dev,
-               struct ethtool_drvinfo *info)
+                                struct ethtool_drvinfo *info)
 {
        strlcpy(info->driver, "bcmgenet", sizeof(info->driver));
        strlcpy(info->version, "v2.0", sizeof(info->version));
        info->n_stats = BCMGENET_STATS_LEN;
-
 }
 
 static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
@@ -643,8 +631,8 @@ static int bcmgenet_get_sset_count(struct net_device *dev, int string_set)
        }
 }
 
-static void bcmgenet_get_strings(struct net_device *dev,
-                               u32 stringset, u8 *data)
+static void bcmgenet_get_strings(struct net_device *dev, u32 stringset,
+                                u8 *data)
 {
        int i;
 
@@ -652,8 +640,8 @@ static void bcmgenet_get_strings(struct net_device *dev,
        case ETH_SS_STATS:
                for (i = 0; i < BCMGENET_STATS_LEN; i++) {
                        memcpy(data + i * ETH_GSTRING_LEN,
-                               bcmgenet_gstrings_stats[i].stat_string,
-                               ETH_GSTRING_LEN);
+                              bcmgenet_gstrings_stats[i].stat_string,
+                              ETH_GSTRING_LEN);
                }
                break;
        }
@@ -678,8 +666,8 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
                case BCMGENET_STAT_RUNT:
                        if (s->type != BCMGENET_STAT_MIB_RX)
                                offset = BCMGENET_STAT_OFFSET;
-                       val = bcmgenet_umac_readl(priv, UMAC_MIB_START +
-                                                               j + offset);
+                       val = bcmgenet_umac_readl(priv,
+                                                 UMAC_MIB_START + j + offset);
                        break;
                case BCMGENET_STAT_MISC:
                        val = bcmgenet_umac_readl(priv, s->reg_offset);
@@ -696,8 +684,8 @@ static void bcmgenet_update_mib_counters(struct bcmgenet_priv *priv)
 }
 
 static void bcmgenet_get_ethtool_stats(struct net_device *dev,
-                                       struct ethtool_stats *stats,
-                                       u64 *data)
+                                      struct ethtool_stats *stats,
+                                      u64 *data)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        int i;
@@ -730,6 +718,8 @@ static struct ethtool_ops bcmgenet_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_msglevel           = bcmgenet_get_msglevel,
        .set_msglevel           = bcmgenet_set_msglevel,
+       .get_wol                = bcmgenet_get_wol,
+       .set_wol                = bcmgenet_set_wol,
 };
 
 /* Power down the unimac, based on mode. */
@@ -743,6 +733,10 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
                phy_detach(priv->phydev);
                break;
 
+       case GENET_POWER_WOL_MAGIC:
+               bcmgenet_wol_power_down_cfg(priv, mode);
+               break;
+
        case GENET_POWER_PASSIVE:
                /* Power down LED */
                bcmgenet_mii_reset(priv->dev);
@@ -759,7 +753,7 @@ static void bcmgenet_power_down(struct bcmgenet_priv *priv,
 }
 
 static void bcmgenet_power_up(struct bcmgenet_priv *priv,
-                               enum bcmgenet_power_mode mode)
+                             enum bcmgenet_power_mode mode)
 {
        u32 reg;
 
@@ -777,6 +771,9 @@ static void bcmgenet_power_up(struct bcmgenet_priv *priv,
                /* enable APD */
                reg |= EXT_PWR_DN_EN_LD;
                break;
+       case GENET_POWER_WOL_MAGIC:
+               bcmgenet_wol_power_up_cfg(priv, mode);
+               return;
        default:
                break;
        }
@@ -841,37 +838,37 @@ static inline void bcmgenet_tx_ring16_int_disable(struct bcmgenet_priv *priv,
                                                  struct bcmgenet_tx_ring *ring)
 {
        bcmgenet_intrl2_0_writel(priv,
-                       UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
-                       INTRL2_CPU_MASK_SET);
+                                UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+                                INTRL2_CPU_MASK_SET);
 }
 
 static inline void bcmgenet_tx_ring16_int_enable(struct bcmgenet_priv *priv,
                                                 struct bcmgenet_tx_ring *ring)
 {
        bcmgenet_intrl2_0_writel(priv,
-                       UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
-                       INTRL2_CPU_MASK_CLEAR);
+                                UMAC_IRQ_TXDMA_BDONE | UMAC_IRQ_TXDMA_PDONE,
+                                INTRL2_CPU_MASK_CLEAR);
 }
 
 static inline void bcmgenet_tx_ring_int_enable(struct bcmgenet_priv *priv,
-                                               struct bcmgenet_tx_ring *ring)
+                                              struct bcmgenet_tx_ring *ring)
 {
-       bcmgenet_intrl2_1_writel(priv,
-                       (1 << ring->index), INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+                                INTRL2_CPU_MASK_CLEAR);
        priv->int1_mask &= ~(1 << ring->index);
 }
 
 static inline void bcmgenet_tx_ring_int_disable(struct bcmgenet_priv *priv,
                                                struct bcmgenet_tx_ring *ring)
 {
-       bcmgenet_intrl2_1_writel(priv,
-                       (1 << ring->index), INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_1_writel(priv, (1 << ring->index),
+                                INTRL2_CPU_MASK_SET);
        priv->int1_mask |= (1 << ring->index);
 }
 
 /* Unlocked version of the reclaim routine */
 static void __bcmgenet_tx_reclaim(struct net_device *dev,
-                               struct bcmgenet_tx_ring *ring)
+                                 struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        int last_tx_cn, last_c_index, num_tx_bds;
@@ -879,7 +876,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
        struct netdev_queue *txq;
        unsigned int c_index;
 
-       /* Compute how many buffers are transmited since last xmit call */
+       /* Compute how many buffers are transmitted since last xmit call */
        c_index = bcmgenet_tdma_ring_readl(priv, ring->index, TDMA_CONS_INDEX);
        txq = netdev_get_tx_queue(dev, ring->queue);
 
@@ -894,9 +891,9 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                last_tx_cn = num_tx_bds - last_c_index + c_index;
 
        netif_dbg(priv, tx_done, dev,
-                       "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
-                       __func__, ring->index,
-                       c_index, last_tx_cn, last_c_index);
+                 "%s ring=%d index=%d last_tx_cn=%d last_index=%d\n",
+                 __func__, ring->index,
+                 c_index, last_tx_cn, last_c_index);
 
        /* Reclaim transmitted buffers */
        while (last_tx_cn-- > 0) {
@@ -904,17 +901,17 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
                if (tx_cb_ptr->skb) {
                        dev->stats.tx_bytes += tx_cb_ptr->skb->len;
                        dma_unmap_single(&dev->dev,
-                                       dma_unmap_addr(tx_cb_ptr, dma_addr),
-                                       tx_cb_ptr->skb->len,
-                                       DMA_TO_DEVICE);
+                                        dma_unmap_addr(tx_cb_ptr, dma_addr),
+                                        tx_cb_ptr->skb->len,
+                                        DMA_TO_DEVICE);
                        bcmgenet_free_cb(tx_cb_ptr);
                } else if (dma_unmap_addr(tx_cb_ptr, dma_addr)) {
                        dev->stats.tx_bytes +=
                                dma_unmap_len(tx_cb_ptr, dma_len);
                        dma_unmap_page(&dev->dev,
-                                       dma_unmap_addr(tx_cb_ptr, dma_addr),
-                                       dma_unmap_len(tx_cb_ptr, dma_len),
-                                       DMA_TO_DEVICE);
+                                      dma_unmap_addr(tx_cb_ptr, dma_addr),
+                                      dma_unmap_len(tx_cb_ptr, dma_len),
+                                      DMA_TO_DEVICE);
                        dma_unmap_addr_set(tx_cb_ptr, dma_addr, 0);
                }
                dev->stats.tx_packets++;
@@ -934,7 +931,7 @@ static void __bcmgenet_tx_reclaim(struct net_device *dev,
 }
 
 static void bcmgenet_tx_reclaim(struct net_device *dev,
-               struct bcmgenet_tx_ring *ring)
+                               struct bcmgenet_tx_ring *ring)
 {
        unsigned long flags;
 
@@ -1008,11 +1005,11 @@ static int bcmgenet_xmit_single(struct net_device *dev,
        return 0;
 }
 
-/* Transmit a SKB fragement */
+/* Transmit a SKB fragment */
 static int bcmgenet_xmit_frag(struct net_device *dev,
-                               skb_frag_t *frag,
-                               u16 dma_desc_flags,
-                               struct bcmgenet_tx_ring *ring)
+                             skb_frag_t *frag,
+                             u16 dma_desc_flags,
+                             struct bcmgenet_tx_ring *ring)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        struct device *kdev = &priv->pdev->dev;
@@ -1027,11 +1024,11 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
        tx_cb_ptr->skb = NULL;
 
        mapping = skb_frag_dma_map(kdev, frag, 0,
-               skb_frag_size(frag), DMA_TO_DEVICE);
+                                  skb_frag_size(frag), DMA_TO_DEVICE);
        ret = dma_mapping_error(kdev, mapping);
        if (ret) {
                netif_err(priv, tx_err, dev, "%s: Tx DMA map failed\n",
-                               __func__);
+                         __func__);
                return ret;
        }
 
@@ -1039,8 +1036,8 @@ static int bcmgenet_xmit_frag(struct net_device *dev,
        dma_unmap_len_set(tx_cb_ptr, dma_len, frag->size);
 
        dmadesc_set(priv, tx_cb_ptr->bd_addr, mapping,
-                       (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
-                       (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
+                   (frag->size << DMA_BUFLENGTH_SHIFT) | dma_desc_flags |
+                   (priv->hw_params->qtag_mask << DMA_TX_QTAG_SHIFT));
 
 
        ring->free_bds -= 1;
@@ -1103,8 +1100,9 @@ static int bcmgenet_put_tx_csum(struct net_device *dev, struct sk_buff *skb)
                        tx_csum_info |= STATUS_TX_CSUM_LV;
                        if (ip_proto == IPPROTO_UDP && ip_ver == ETH_P_IP)
                                tx_csum_info |= STATUS_TX_CSUM_PROTO_UDP;
-               } else
+               } else {
                        tx_csum_info = 0;
+               }
 
                status->tx_csum_info = tx_csum_info;
        }
@@ -1144,11 +1142,16 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        if (ring->free_bds <= nr_frags + 1) {
                netif_tx_stop_queue(txq);
                netdev_err(dev, "%s: tx ring %d full when queue %d awake\n",
-                               __func__, index, ring->queue);
+                          __func__, index, ring->queue);
                ret = NETDEV_TX_BUSY;
                goto out;
        }
 
+       if (skb_padto(skb, ETH_ZLEN)) {
+               ret = NETDEV_TX_OK;
+               goto out;
+       }
+
        /* set the SKB transmit checksum */
        if (priv->desc_64b_en) {
                ret = bcmgenet_put_tx_csum(dev, skb);
@@ -1172,8 +1175,9 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
        /* xmit fragment */
        for (i = 0; i < nr_frags; i++) {
                ret = bcmgenet_xmit_frag(dev,
-                               &skb_shinfo(skb)->frags[i],
-                               (i == nr_frags - 1) ? DMA_EOP : 0, ring);
+                                        &skb_shinfo(skb)->frags[i],
+                                        (i == nr_frags - 1) ? DMA_EOP : 0,
+                                        ring);
                if (ret) {
                        ret = NETDEV_TX_OK;
                        goto out;
@@ -1186,7 +1190,7 @@ static netdev_tx_t bcmgenet_xmit(struct sk_buff *skb, struct net_device *dev)
         * producer index, now write it down to the hardware
         */
        bcmgenet_tdma_ring_writel(priv, ring->index,
-                       ring->prod_index, TDMA_PROD_INDEX);
+                                 ring->prod_index, TDMA_PROD_INDEX);
 
        if (ring->free_bds <= (MAX_SKB_FRAGS + 1)) {
                netif_tx_stop_queue(txq);
@@ -1200,16 +1204,14 @@ out:
 }
 
 
-static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
-                               struct enet_cb *cb)
+static int bcmgenet_rx_refill(struct bcmgenet_priv *priv, struct enet_cb *cb)
 {
        struct device *kdev = &priv->pdev->dev;
        struct sk_buff *skb;
        dma_addr_t mapping;
        int ret;
 
-       skb = netdev_alloc_skb(priv->dev,
-                               priv->rx_buf_len + SKB_ALIGNMENT);
+       skb = netdev_alloc_skb(priv->dev, priv->rx_buf_len + SKB_ALIGNMENT);
        if (!skb)
                return -ENOMEM;
 
@@ -1217,12 +1219,12 @@ static int bcmgenet_rx_refill(struct bcmgenet_priv *priv,
        WARN_ON(cb->skb != NULL);
        cb->skb = skb;
        mapping = dma_map_single(kdev, skb->data,
-                       priv->rx_buf_len, DMA_FROM_DEVICE);
+                                priv->rx_buf_len, DMA_FROM_DEVICE);
        ret = dma_mapping_error(kdev, mapping);
        if (ret) {
                bcmgenet_free_cb(cb);
                netif_err(priv, rx_err, priv->dev,
-                               "%s DMA map failed\n", __func__);
+                         "%s DMA map failed\n", __func__);
                return ret;
        }
 
@@ -1257,8 +1259,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
        unsigned int p_index;
        unsigned int chksum_ok = 0;
 
-       p_index = bcmgenet_rdma_ring_readl(priv,
-                       DESC_INDEX, RDMA_PROD_INDEX);
+       p_index = bcmgenet_rdma_ring_readl(priv, DESC_INDEX, RDMA_PROD_INDEX);
        p_index &= DMA_P_INDEX_MASK;
 
        if (p_index < priv->rx_c_index)
@@ -1268,11 +1269,10 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                rxpkttoprocess = p_index - priv->rx_c_index;
 
        netif_dbg(priv, rx_status, dev,
-               "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
+                 "RDMA: rxpkttoprocess=%d\n", rxpkttoprocess);
 
        while ((rxpktprocessed < rxpkttoprocess) &&
-                       (rxpktprocessed < budget)) {
-
+              (rxpktprocessed < budget)) {
                /* Unmap the packet contents such that we can use the
                 * RSV from the 64 bytes descriptor when enabled and save
                 * a 32-bits register read
@@ -1280,15 +1280,17 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                cb = &priv->rx_cbs[priv->rx_read_ptr];
                skb = cb->skb;
                dma_unmap_single(&dev->dev, dma_unmap_addr(cb, dma_addr),
-                               priv->rx_buf_len, DMA_FROM_DEVICE);
+                                priv->rx_buf_len, DMA_FROM_DEVICE);
 
                if (!priv->desc_64b_en) {
-                       dma_length_status = dmadesc_get_length_status(priv,
-                                                       priv->rx_bds +
-                                                       (priv->rx_read_ptr *
-                                                        DMA_DESC_SIZE));
+                       dma_length_status =
+                               dmadesc_get_length_status(priv,
+                                                         priv->rx_bds +
+                                                         (priv->rx_read_ptr *
+                                                          DMA_DESC_SIZE));
                } else {
                        struct status_64 *status;
+
                        status = (struct status_64 *)skb->data;
                        dma_length_status = status->length_status;
                }
@@ -1300,9 +1302,9 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                len = dma_length_status >> DMA_BUFLENGTH_SHIFT;
 
                netif_dbg(priv, rx_status, dev,
-                       "%s: p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
-                       __func__, p_index, priv->rx_c_index, priv->rx_read_ptr,
-                       dma_length_status);
+                         "%s:p_ind=%d c_ind=%d read_ptr=%d len_stat=0x%08x\n",
+                         __func__, p_index, priv->rx_c_index,
+                         priv->rx_read_ptr, dma_length_status);
 
                rxpktprocessed++;
 
@@ -1318,7 +1320,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
 
                if (unlikely(!(dma_flag & DMA_EOP) || !(dma_flag & DMA_SOP))) {
                        netif_err(priv, rx_status, dev,
-                                       "Droping fragmented packet!\n");
+                                 "dropping fragmented packet!\n");
                        dev->stats.rx_dropped++;
                        dev->stats.rx_errors++;
                        dev_kfree_skb_any(cb->skb);
@@ -1332,7 +1334,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                                                DMA_RX_LG |
                                                DMA_RX_RXER))) {
                        netif_err(priv, rx_status, dev, "dma_flag=0x%x\n",
-                                               (unsigned int)dma_flag);
+                                 (unsigned int)dma_flag);
                        if (dma_flag & DMA_RX_CRC_ERROR)
                                dev->stats.rx_crc_errors++;
                        if (dma_flag & DMA_RX_OV)
@@ -1351,7 +1353,7 @@ static unsigned int bcmgenet_desc_rx(struct bcmgenet_priv *priv,
                } /* error packet */
 
                chksum_ok = (dma_flag & priv->dma_rx_chk_bit) &&
-                               priv->desc_rxchk_en;
+                            priv->desc_rxchk_en;
 
                skb_put(skb, len);
                if (priv->desc_64b_en) {
@@ -1408,17 +1410,9 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
                if (cb->skb)
                        continue;
 
-               /* set the DMA descriptor length once and for all
-                * it will only change if we support dynamically sizing
-                * priv->rx_buf_len, but we do not
-                */
-               dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
-                               priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
-
                ret = bcmgenet_rx_refill(priv, cb);
                if (ret)
                        break;
-
        }
 
        return ret;
@@ -1434,8 +1428,8 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
 
                if (dma_unmap_addr(cb, dma_addr)) {
                        dma_unmap_single(&priv->dev->dev,
-                                       dma_unmap_addr(cb, dma_addr),
-                                       priv->rx_buf_len, DMA_FROM_DEVICE);
+                                        dma_unmap_addr(cb, dma_addr),
+                                        priv->rx_buf_len, DMA_FROM_DEVICE);
                        dma_unmap_addr_set(cb, dma_addr, 0);
                }
 
@@ -1444,6 +1438,24 @@ static void bcmgenet_free_rx_buffers(struct bcmgenet_priv *priv)
        }
 }
 
+static void umac_enable_set(struct bcmgenet_priv *priv, u32 mask, bool enable)
+{
+       u32 reg;
+
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       if (enable)
+               reg |= mask;
+       else
+               reg &= ~mask;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       /* UniMAC stops on a packet boundary, wait for a full-size packet
+        * to be processed
+        */
+       if (enable == 0)
+               usleep_range(1000, 2000);
+}
+
 static int reset_umac(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -1469,13 +1481,24 @@ static int reset_umac(struct bcmgenet_priv *priv)
 
        if (timeout == 1000) {
                dev_err(kdev,
-                       "timeout waiting for MAC to come out of resetn\n");
+                       "timeout waiting for MAC to come out of reset\n");
                return -ETIMEDOUT;
        }
 
        return 0;
 }
 
+static void bcmgenet_intr_disable(struct bcmgenet_priv *priv)
+{
+       /* Mask all interrupts.*/
+       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
+       bcmgenet_intrl2_1_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
+       bcmgenet_intrl2_1_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+}
+
 static int init_umac(struct bcmgenet_priv *priv)
 {
        struct device *kdev = &priv->pdev->dev;
@@ -1491,7 +1514,8 @@ static int init_umac(struct bcmgenet_priv *priv)
        bcmgenet_umac_writel(priv, 0, UMAC_CMD);
        /* clear tx/rx counter */
        bcmgenet_umac_writel(priv,
-               MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT, UMAC_MIB_CTRL);
+                            MIB_RESET_RX | MIB_RESET_TX | MIB_RESET_RUNT,
+                            UMAC_MIB_CTRL);
        bcmgenet_umac_writel(priv, 0, UMAC_MIB_CTRL);
 
        bcmgenet_umac_writel(priv, ENET_MAX_MTU_SIZE, UMAC_MAX_FRAME_LEN);
@@ -1504,21 +1528,18 @@ static int init_umac(struct bcmgenet_priv *priv)
        if (!GENET_IS_V1(priv) && !GENET_IS_V2(priv))
                bcmgenet_rbuf_writel(priv, 1, RBUF_TBUF_SIZE_CTRL);
 
-       /* Mask all interrupts.*/
-       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_MASK_SET);
-       bcmgenet_intrl2_0_writel(priv, 0xFFFFFFFF, INTRL2_CPU_CLEAR);
-       bcmgenet_intrl2_0_writel(priv, 0, INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intr_disable(priv);
 
        cpu_mask_clear = UMAC_IRQ_RXDMA_BDONE;
 
        dev_dbg(kdev, "%s:Enabling RXDMA_BDONE interrupt\n", __func__);
 
-       /* Monitor cable plug/unpluged event for internal PHY */
-       if (phy_is_internal(priv->phydev))
+       /* Monitor cable plug/unplugged event for internal PHY */
+       if (phy_is_internal(priv->phydev)) {
                cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
-       else if (priv->ext_phy)
+       } else if (priv->ext_phy) {
                cpu_mask_clear |= (UMAC_IRQ_LINK_DOWN | UMAC_IRQ_LINK_UP);
-       else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
+       else if (priv->phy_interface == PHY_INTERFACE_MODE_MOCA) {
                reg = bcmgenet_bp_mc_get(priv);
                reg |= BIT(priv->hw_params->bp_in_en_shift);
 
@@ -1534,8 +1555,7 @@ static int init_umac(struct bcmgenet_priv *priv)
        if (priv->hw_params->flags & GENET_HAS_MDIO_INTR)
                cpu_mask_clear |= UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR;
 
-       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear,
-               INTRL2_CPU_MASK_CLEAR);
+       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
 
        /* Enable rx/tx engine.*/
        dev_dbg(kdev, "done init umac\n");
@@ -1584,28 +1604,28 @@ static void bcmgenet_init_tx_ring(struct bcmgenet_priv *priv,
        bcmgenet_tdma_ring_writel(priv, index, 1, DMA_MBUF_DONE_THRESH);
        /* Disable rate control for now */
        bcmgenet_tdma_ring_writel(priv, index, flow_period_val,
-                       TDMA_FLOW_PERIOD);
+                                 TDMA_FLOW_PERIOD);
        /* Unclassified traffic goes to ring 16 */
        bcmgenet_tdma_ring_writel(priv, index,
-                       ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
-                       DMA_RING_BUF_SIZE);
+                                 ((size << DMA_RING_SIZE_SHIFT) |
+                                  RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
 
        first_bd = write_ptr;
 
        /* Set start and end address, read and write pointers */
        bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
-                       DMA_START_ADDR);
+                                 DMA_START_ADDR);
        bcmgenet_tdma_ring_writel(priv, index, first_bd * words_per_bd,
-                       TDMA_READ_PTR);
+                                 TDMA_READ_PTR);
        bcmgenet_tdma_ring_writel(priv, index, first_bd,
-                       TDMA_WRITE_PTR);
+                                 TDMA_WRITE_PTR);
        bcmgenet_tdma_ring_writel(priv, index, end_ptr * words_per_bd - 1,
-                       DMA_END_ADDR);
+                                 DMA_END_ADDR);
 }
 
 /* Initialize a RDMA ring */
 static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
-                                 unsigned int index, unsigned int size)
+                                unsigned int index, unsigned int size)
 {
        u32 words_per_bd = WORDS_PER_BD(priv);
        int ret;
@@ -1616,8 +1636,8 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
        priv->rx_bd_assign_index = 0;
        priv->rx_c_index = 0;
        priv->rx_read_ptr = 0;
-       priv->rx_cbs = kzalloc(priv->num_rx_bds * sizeof(struct enet_cb),
-                               GFP_KERNEL);
+       priv->rx_cbs = kcalloc(priv->num_rx_bds, sizeof(struct enet_cb),
+                              GFP_KERNEL);
        if (!priv->rx_cbs)
                return -ENOMEM;
 
@@ -1631,14 +1651,15 @@ static int bcmgenet_init_rx_ring(struct bcmgenet_priv *priv,
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_PROD_INDEX);
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_CONS_INDEX);
        bcmgenet_rdma_ring_writel(priv, index,
-               ((size << DMA_RING_SIZE_SHIFT) | RX_BUF_LENGTH),
-               DMA_RING_BUF_SIZE);
+                                 ((size << DMA_RING_SIZE_SHIFT) |
+                                  RX_BUF_LENGTH), DMA_RING_BUF_SIZE);
        bcmgenet_rdma_ring_writel(priv, index, 0, DMA_START_ADDR);
        bcmgenet_rdma_ring_writel(priv, index,
-               words_per_bd * size - 1, DMA_END_ADDR);
+                                 words_per_bd * size - 1, DMA_END_ADDR);
        bcmgenet_rdma_ring_writel(priv, index,
-                       (DMA_FC_THRESH_LO << DMA_XOFF_THRESHOLD_SHIFT) |
-                       DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
+                                 (DMA_FC_THRESH_LO <<
+                                  DMA_XOFF_THRESHOLD_SHIFT) |
+                                  DMA_FC_THRESH_HI, RDMA_XON_XOFF_THRESH);
        bcmgenet_rdma_ring_writel(priv, index, 0, RDMA_READ_PTR);
 
        return ret;
@@ -1685,10 +1706,10 @@ static void bcmgenet_init_multiq(struct net_device *dev)
                 * (ring 16)
                 */
                bcmgenet_init_tx_ring(priv, i, priv->hw_params->bds_cnt,
-                                       i * priv->hw_params->bds_cnt,
-                                       (i + 1) * priv->hw_params->bds_cnt);
+                                     i * priv->hw_params->bds_cnt,
+                                     (i + 1) * priv->hw_params->bds_cnt);
 
-               /* Configure ring as decriptor ring and setup priority */
+               /* Configure ring as descriptor ring and setup priority */
                ring_cfg |= 1 << i;
                dma_priority |= ((GENET_Q0_PRIORITY + i) <<
                                (GENET_MAX_MQ_CNT + 1) * i);
@@ -1754,11 +1775,11 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
        /* Init tDma */
        bcmgenet_tdma_writel(priv, DMA_MAX_BURST_LENGTH, DMA_SCB_BURST_SIZE);
 
-       /* Initialize commont TX ring structures */
+       /* Initialize common TX ring structures */
        priv->tx_bds = priv->base + priv->hw_params->tdma_offset;
        priv->num_tx_bds = TOTAL_DESC;
-       priv->tx_cbs = kzalloc(priv->num_tx_bds * sizeof(struct enet_cb),
-                               GFP_KERNEL);
+       priv->tx_cbs = kcalloc(priv->num_tx_bds, sizeof(struct enet_cb),
+                              GFP_KERNEL);
        if (!priv->tx_cbs) {
                bcmgenet_fini_dma(priv);
                return -ENOMEM;
@@ -1769,8 +1790,9 @@ static int bcmgenet_init_dma(struct bcmgenet_priv *priv)
 
        /* initialize special ring 16 */
        bcmgenet_init_tx_ring(priv, DESC_INDEX, GENET_DEFAULT_BD_CNT,
-                       priv->hw_params->tx_queues * priv->hw_params->bds_cnt,
-                       TOTAL_DESC);
+                             priv->hw_params->tx_queues *
+                             priv->hw_params->bds_cnt,
+                             TOTAL_DESC);
 
        return 0;
 }
@@ -1791,11 +1813,11 @@ static int bcmgenet_poll(struct napi_struct *napi, int budget)
        priv->rx_c_index += work_done;
        priv->rx_c_index &= DMA_C_INDEX_MASK;
        bcmgenet_rdma_ring_writel(priv, DESC_INDEX,
-                               priv->rx_c_index, RDMA_CONS_INDEX);
+                                 priv->rx_c_index, RDMA_CONS_INDEX);
        if (work_done < budget) {
                napi_complete(napi);
-               bcmgenet_intrl2_0_writel(priv,
-                       UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_CLEAR);
+               bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+                                        INTRL2_CPU_MASK_CLEAR);
        }
 
        return work_done;
@@ -1809,11 +1831,18 @@ static void bcmgenet_irq_task(struct work_struct *work)
 
        netif_dbg(priv, intr, priv->dev, "%s\n", __func__);
 
+       if (priv->irq0_stat & UMAC_IRQ_MPD_R) {
+               priv->irq0_stat &= ~UMAC_IRQ_MPD_R;
+               netif_dbg(priv, wol, priv->dev,
+                         "magic packet detected, waking up\n");
+               bcmgenet_power_up(priv, GENET_POWER_WOL_MAGIC);
+       }
+
        /* Link UP/DOWN event */
        if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-               (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
+           (priv->irq0_stat & (UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN))) {
                phy_mac_interrupt(priv->phydev,
-                       priv->irq0_stat & UMAC_IRQ_LINK_UP);
+                                 priv->irq0_stat & UMAC_IRQ_LINK_UP);
                priv->irq0_stat &= ~(UMAC_IRQ_LINK_UP|UMAC_IRQ_LINK_DOWN);
        }
 }
@@ -1828,11 +1857,11 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
        priv->irq1_stat =
                bcmgenet_intrl2_1_readl(priv, INTRL2_CPU_STAT) &
                ~priv->int1_mask;
-       /* clear inerrupts*/
+       /* clear interrupts */
        bcmgenet_intrl2_1_writel(priv, priv->irq1_stat, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
-               "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
+                 "%s: IRQ=0x%x\n", __func__, priv->irq1_stat);
        /* Check the MBDONE interrupts.
         * packet is done, reclaim descriptors
         */
@@ -1841,7 +1870,7 @@ static irqreturn_t bcmgenet_isr1(int irq, void *dev_id)
                for (index = 0; index < 16; index++) {
                        if (priv->irq1_stat & (1 << index))
                                bcmgenet_tx_reclaim(priv->dev,
-                                               &priv->tx_rings[index]);
+                                                   &priv->tx_rings[index]);
                }
        }
        return IRQ_HANDLED;
@@ -1856,11 +1885,11 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        priv->irq0_stat =
                bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_STAT) &
                ~bcmgenet_intrl2_0_readl(priv, INTRL2_CPU_MASK_STATUS);
-       /* clear inerrupts*/
+       /* clear interrupts */
        bcmgenet_intrl2_0_writel(priv, priv->irq0_stat, INTRL2_CPU_CLEAR);
 
        netif_dbg(priv, intr, priv->dev,
-               "IRQ=0x%x\n", priv->irq0_stat);
+                 "IRQ=0x%x\n", priv->irq0_stat);
 
        if (priv->irq0_stat & (UMAC_IRQ_RXDMA_BDONE | UMAC_IRQ_RXDMA_PDONE)) {
                /* We use NAPI(software interrupt throttling, if
@@ -1868,8 +1897,8 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
                 * Disable interrupt, will be enabled in the poll method.
                 */
                if (likely(napi_schedule_prep(&priv->napi))) {
-                       bcmgenet_intrl2_0_writel(priv,
-                               UMAC_IRQ_RXDMA_BDONE, INTRL2_CPU_MASK_SET);
+                       bcmgenet_intrl2_0_writel(priv, UMAC_IRQ_RXDMA_BDONE,
+                                                INTRL2_CPU_MASK_SET);
                        __napi_schedule(&priv->napi);
                }
        }
@@ -1890,7 +1919,7 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        }
 
        if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
-               priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
+           priv->irq0_stat & (UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR)) {
                priv->irq0_stat &= ~(UMAC_IRQ_MDIO_DONE | UMAC_IRQ_MDIO_ERROR);
                wake_up(&priv->wq);
        }
@@ -1898,6 +1927,15 @@ static irqreturn_t bcmgenet_isr0(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
+static irqreturn_t bcmgenet_wol_isr(int irq, void *dev_id)
+{
+       struct bcmgenet_priv *priv = dev_id;
+
+       pm_wakeup_event(&priv->pdev->dev, 0);
+
+       return IRQ_HANDLED;
+}
+
 static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 {
        u32 reg;
@@ -1913,7 +1951,7 @@ static void bcmgenet_umac_reset(struct bcmgenet_priv *priv)
 }
 
 static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
-                                 unsigned char *addr)
+                                unsigned char *addr)
 {
        bcmgenet_umac_writel(priv, (addr[0] << 24) | (addr[1] << 16) |
                        (addr[2] << 8) | addr[3], UMAC_MAC0);
@@ -1922,14 +1960,8 @@ static void bcmgenet_set_hw_addr(struct bcmgenet_priv *priv,
 
 static int bcmgenet_wol_resume(struct bcmgenet_priv *priv)
 {
-       int ret;
-
        /* From WOL-enabled suspend, switch to regular clock */
-       clk_disable(priv->clk_wol);
-       /* init umac registers to synchronize s/w with h/w */
-       ret = init_umac(priv);
-       if (ret)
-               return ret;
+       clk_disable_unprepare(priv->clk_wol);
 
        phy_init_hw(priv->phydev);
        /* Speed settings must be restored */
@@ -1974,6 +2006,23 @@ static void bcmgenet_enable_dma(struct bcmgenet_priv *priv, u32 dma_ctrl)
        bcmgenet_tdma_writel(priv, reg, DMA_CTRL);
 }
 
+static void bcmgenet_netif_start(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       /* Start the network engine */
+       napi_enable(&priv->napi);
+
+       umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, true);
+
+       if (phy_is_internal(priv->phydev))
+               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
+
+       netif_tx_start_all_queues(dev);
+
+       phy_start(priv->phydev);
+}
+
 static int bcmgenet_open(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
@@ -1995,18 +2044,14 @@ static int bcmgenet_open(struct net_device *dev)
                goto err_clk_disable;
 
        /* disable ethernet MAC while updating its registers */
+       umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+
+       /* Make sure we reflect the value of CRC_CMD_FWD */
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg &= ~(CMD_TX_EN | CMD_RX_EN);
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
 
        bcmgenet_set_hw_addr(priv, dev->dev_addr);
 
-       if (priv->wol_enabled) {
-               ret = bcmgenet_wol_resume(priv);
-               if (ret)
-                       return ret;
-       }
-
        if (phy_is_internal(priv->phydev)) {
                reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
                reg |= EXT_ENERGY_DET_MASK;
@@ -2027,37 +2072,20 @@ static int bcmgenet_open(struct net_device *dev)
        bcmgenet_enable_dma(priv, dma_ctrl);
 
        ret = request_irq(priv->irq0, bcmgenet_isr0, IRQF_SHARED,
-                       dev->name, priv);
+                         dev->name, priv);
        if (ret < 0) {
                netdev_err(dev, "can't request IRQ %d\n", priv->irq0);
                goto err_fini_dma;
        }
 
        ret = request_irq(priv->irq1, bcmgenet_isr1, IRQF_SHARED,
-                               dev->name, priv);
+                         dev->name, priv);
        if (ret < 0) {
                netdev_err(dev, "can't request IRQ %d\n", priv->irq1);
                goto err_irq0;
        }
 
-       /* Start the network engine */
-       napi_enable(&priv->napi);
-
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg |= (CMD_TX_EN | CMD_RX_EN);
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-
-       /* Make sure we reflect the value of CRC_CMD_FWD */
-       priv->crc_fwd_en = !!(reg & CMD_CRC_FWD);
-
-       device_set_wakeup_capable(&dev->dev, 1);
-
-       if (phy_is_internal(priv->phydev))
-               bcmgenet_power_up(priv, GENET_POWER_PASSIVE);
-
-       netif_tx_start_all_queues(dev);
-
-       phy_start(priv->phydev);
+       bcmgenet_netif_start(dev);
 
        return 0;
 
@@ -2092,8 +2120,7 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        }
 
        if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev,
-                       "Timed out while disabling TX DMA\n");
+               netdev_warn(priv->dev, "Timed out while disabling TX DMA\n");
                ret = -ETIMEDOUT;
        }
 
@@ -2116,41 +2143,47 @@ static int bcmgenet_dma_teardown(struct bcmgenet_priv *priv)
        }
 
        if (timeout == DMA_TIMEOUT_VAL) {
-               netdev_warn(priv->dev,
-                       "Timed out while disabling RX DMA\n");
-                       ret = -ETIMEDOUT;
+               netdev_warn(priv->dev, "Timed out while disabling RX DMA\n");
+               ret = -ETIMEDOUT;
        }
 
        return ret;
 }
 
+static void bcmgenet_netif_stop(struct net_device *dev)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+
+       netif_tx_stop_all_queues(dev);
+       napi_disable(&priv->napi);
+       phy_stop(priv->phydev);
+
+       bcmgenet_intr_disable(priv);
+
+       /* Wait for pending work items to complete. Since interrupts are
+        * disabled no new work will be scheduled.
+        */
+       cancel_work_sync(&priv->bcmgenet_irq_work);
+}
+
 static int bcmgenet_close(struct net_device *dev)
 {
        struct bcmgenet_priv *priv = netdev_priv(dev);
        int ret;
-       u32 reg;
 
        netif_dbg(priv, ifdown, dev, "bcmgenet_close\n");
 
-       phy_stop(priv->phydev);
+       bcmgenet_netif_stop(dev);
 
        /* Disable MAC receive */
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_RX_EN;
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-
-       netif_tx_stop_all_queues(dev);
+       umac_enable_set(priv, CMD_RX_EN, false);
 
        ret = bcmgenet_dma_teardown(priv);
        if (ret)
                return ret;
 
        /* Disable MAC transmit. TX DMA disabled have to done before this */
-       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
-       reg &= ~CMD_TX_EN;
-       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-
-       napi_disable(&priv->napi);
+       umac_enable_set(priv, CMD_TX_EN, false);
 
        /* tx reclaim */
        bcmgenet_tx_reclaim_all(dev);
@@ -2159,18 +2192,9 @@ static int bcmgenet_close(struct net_device *dev)
        free_irq(priv->irq0, priv);
        free_irq(priv->irq1, priv);
 
-       /* Wait for pending work items to complete - we are stopping
-        * the clock now. Since interrupts are disabled, no new work
-        * will be scheduled.
-        */
-       cancel_work_sync(&priv->bcmgenet_irq_work);
-
        if (phy_is_internal(priv->phydev))
                bcmgenet_power_down(priv, GENET_POWER_PASSIVE);
 
-       if (priv->wol_enabled)
-               clk_enable(priv->clk_wol);
-
        if (!IS_ERR(priv->clk))
                clk_disable_unprepare(priv->clk);
 
@@ -2199,12 +2223,11 @@ static inline void bcmgenet_set_mdf_addr(struct bcmgenet_priv *priv,
 {
        u32 reg;
 
-       bcmgenet_umac_writel(priv,
-                       addr[0] << 8 | addr[1], UMAC_MDF_ADDR + (*i * 4));
-       bcmgenet_umac_writel(priv,
-                       addr[2] << 24 | addr[3] << 16 |
-                       addr[4] << 8 | addr[5],
-                       UMAC_MDF_ADDR + ((*i + 1) * 4));
+       bcmgenet_umac_writel(priv, addr[0] << 8 | addr[1],
+                            UMAC_MDF_ADDR + (*i * 4));
+       bcmgenet_umac_writel(priv, addr[2] << 24 | addr[3] << 16 |
+                            addr[4] << 8 | addr[5],
+                            UMAC_MDF_ADDR + ((*i + 1) * 4));
        reg = bcmgenet_umac_readl(priv, UMAC_MDF_CTRL);
        reg |= (1 << (MAX_MC_COUNT - *mc));
        bcmgenet_umac_writel(priv, reg, UMAC_MDF_CTRL);
@@ -2221,7 +2244,7 @@ static void bcmgenet_set_rx_mode(struct net_device *dev)
 
        netif_dbg(priv, hw, dev, "%s: %08X\n", __func__, dev->flags);
 
-       /* Promiscous mode */
+       /* Promiscuous mode */
        reg = bcmgenet_umac_readl(priv, UMAC_CMD);
        if (dev->flags & IFF_PROMISC) {
                reg |= CMD_PROMISC;
@@ -2401,7 +2424,7 @@ static void bcmgenet_set_hw_params(struct bcmgenet_priv *priv)
 
        /* Print the GENET core version */
        dev_info(&priv->pdev->dev, "GENET " GENET_VER_FMT,
-               major, (reg >> 16) & 0x0f, reg & 0xffff);
+                major, (reg >> 16) & 0x0f, reg & 0xffff);
 
 #ifdef CONFIG_PHYS_ADDR_T_64BIT
        if (!(params->flags & GENET_HAS_40BITS))
@@ -2457,6 +2480,7 @@ static int bcmgenet_probe(struct platform_device *pdev)
        priv = netdev_priv(dev);
        priv->irq0 = platform_get_irq(pdev, 0);
        priv->irq1 = platform_get_irq(pdev, 1);
+       priv->wol_irq = platform_get_irq(pdev, 2);
        if (!priv->irq0 || !priv->irq1) {
                dev_err(&pdev->dev, "can't find IRQs\n");
                err = -EINVAL;
@@ -2491,6 +2515,13 @@ static int bcmgenet_probe(struct platform_device *pdev)
        dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM |
                NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
 
+       /* Request the WOL interrupt and advertise suspend if available */
+       priv->wol_irq_disabled = true;
+       err = devm_request_irq(&pdev->dev, priv->wol_irq, bcmgenet_wol_isr, 0,
+                              dev->name, priv);
+       if (!err)
+               device_set_wakeup_capable(&pdev->dev, 1);
+
        /* Set the needed headroom to account for any possible
         * features enabling/disabling at runtime
         */
@@ -2535,14 +2566,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
        netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
        netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
 
-       err = register_netdev(dev);
-       if (err)
-               goto err_clk_disable;
+       /* libphy will determine the link state */
+       netif_carrier_off(dev);
 
        /* Turn off the main clock, WOL clock is handled separately */
        if (!IS_ERR(priv->clk))
                clk_disable_unprepare(priv->clk);
 
+       err = register_netdev(dev);
+       if (err)
+               goto err;
+
        return err;
 
 err_clk_disable:
@@ -2565,6 +2599,111 @@ static int bcmgenet_remove(struct platform_device *pdev)
        return 0;
 }
 
+#ifdef CONFIG_PM_SLEEP
+static int bcmgenet_suspend(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       int ret;
+
+       if (!netif_running(dev))
+               return 0;
+
+       bcmgenet_netif_stop(dev);
+
+       netif_device_detach(dev);
+
+       /* Disable MAC receive */
+       umac_enable_set(priv, CMD_RX_EN, false);
+
+       ret = bcmgenet_dma_teardown(priv);
+       if (ret)
+               return ret;
+
+       /* Disable MAC transmit. TX DMA disabled have to done before this */
+       umac_enable_set(priv, CMD_TX_EN, false);
+
+       /* tx reclaim */
+       bcmgenet_tx_reclaim_all(dev);
+       bcmgenet_fini_dma(priv);
+
+       /* Prepare the device for Wake-on-LAN and switch to the slow clock */
+       if (device_may_wakeup(d) && priv->wolopts) {
+               bcmgenet_power_down(priv, GENET_POWER_WOL_MAGIC);
+               clk_prepare_enable(priv->clk_wol);
+       }
+
+       /* Turn off the clocks */
+       clk_disable_unprepare(priv->clk);
+
+       return 0;
+}
+
+static int bcmgenet_resume(struct device *d)
+{
+       struct net_device *dev = dev_get_drvdata(d);
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       unsigned long dma_ctrl;
+       int ret;
+       u32 reg;
+
+       if (!netif_running(dev))
+               return 0;
+
+       /* Turn on the clock */
+       ret = clk_prepare_enable(priv->clk);
+       if (ret)
+               return ret;
+
+       bcmgenet_umac_reset(priv);
+
+       ret = init_umac(priv);
+       if (ret)
+               goto out_clk_disable;
+
+       if (priv->wolopts)
+               ret = bcmgenet_wol_resume(priv);
+
+       if (ret)
+               goto out_clk_disable;
+
+       /* disable ethernet MAC while updating its registers */
+       umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
+
+       bcmgenet_set_hw_addr(priv, dev->dev_addr);
+
+       if (phy_is_internal(priv->phydev)) {
+               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+               reg |= EXT_ENERGY_DET_MASK;
+               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+       }
+
+       /* Disable RX/TX DMA and flush TX queues */
+       dma_ctrl = bcmgenet_dma_disable(priv);
+
+       /* Reinitialize TDMA and RDMA and SW housekeeping */
+       ret = bcmgenet_init_dma(priv);
+       if (ret) {
+               netdev_err(dev, "failed to initialize DMA\n");
+               goto out_clk_disable;
+       }
+
+       /* Always enable ring 16 - descriptor ring */
+       bcmgenet_enable_dma(priv, dma_ctrl);
+
+       netif_device_attach(dev);
+
+       bcmgenet_netif_start(dev);
+
+       return 0;
+
+out_clk_disable:
+       clk_disable_unprepare(priv->clk);
+       return ret;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static SIMPLE_DEV_PM_OPS(bcmgenet_pm_ops, bcmgenet_suspend, bcmgenet_resume);
 
 static struct platform_driver bcmgenet_driver = {
        .probe  = bcmgenet_probe,
@@ -2573,6 +2712,7 @@ static struct platform_driver bcmgenet_driver = {
                .name   = "bcmgenet",
                .owner  = THIS_MODULE,
                .of_match_table = bcmgenet_match,
+               .pm     = &bcmgenet_pm_ops,
        },
 };
 module_platform_driver(bcmgenet_driver);
index 0f11710..c862d06 100644 (file)
@@ -4,18 +4,8 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
- *
- *
-*/
+ */
+
 #ifndef __BCMGENET_H__
 #define __BCMGENET_H__
 
@@ -331,9 +321,9 @@ struct bcmgenet_mib_counters {
 #define  EXT_ENERGY_DET_MASK           (1 << 12)
 
 #define EXT_RGMII_OOB_CTRL             0x0C
-#define  RGMII_MODE_EN                 (1 << 0)
 #define  RGMII_LINK                    (1 << 4)
 #define  OOB_DISABLE                   (1 << 5)
+#define  RGMII_MODE_EN                 (1 << 6)
 #define  ID_MODE_DIS                   (1 << 16)
 
 #define EXT_GPHY_CTRL                  0x1C
@@ -456,6 +446,7 @@ struct enet_cb {
 enum bcmgenet_power_mode {
        GENET_POWER_CABLE_SENSE = 0,
        GENET_POWER_PASSIVE,
+       GENET_POWER_WOL_MAGIC,
 };
 
 struct bcmgenet_priv;
@@ -513,9 +504,9 @@ struct bcmgenet_tx_ring {
        unsigned int    cb_ptr;         /* Tx ring initial CB ptr */
        unsigned int    end_ptr;        /* Tx ring end CB ptr */
        void (*int_enable)(struct bcmgenet_priv *priv,
-                               struct bcmgenet_tx_ring *);
+                          struct bcmgenet_tx_ring *);
        void (*int_disable)(struct bcmgenet_priv *priv,
-                               struct bcmgenet_tx_ring *);
+                           struct bcmgenet_tx_ring *);
 };
 
 /* device context */
@@ -569,6 +560,8 @@ struct bcmgenet_priv {
        int irq1;
        unsigned int irq0_stat;
        unsigned int irq1_stat;
+       int wol_irq;
+       bool wol_irq_disabled;
 
        /* HW descriptors/checksum variables */
        bool desc_64b_en;
@@ -583,7 +576,6 @@ struct bcmgenet_priv {
        struct platform_device *pdev;
 
        /* WOL */
-       unsigned long wol_enabled;
        struct clk *clk_wol;
        u32 wolopts;
 
@@ -625,4 +617,12 @@ int bcmgenet_mii_config(struct net_device *dev);
 void bcmgenet_mii_exit(struct net_device *dev);
 void bcmgenet_mii_reset(struct net_device *dev);
 
+/* Wake-on-LAN routines */
+void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
+int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
+int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+                               enum bcmgenet_power_mode mode);
+void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+                              enum bcmgenet_power_mode mode);
+
 #endif /* __BCMGENET_H__ */
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c b/drivers/net/ethernet/broadcom/genet/bcmgenet_wol.c
new file mode 100644 (file)
index 0000000..b82b7e4
--- /dev/null
@@ -0,0 +1,206 @@
+/*
+ * Broadcom GENET (Gigabit Ethernet) Wake-on-LAN support
+ *
+ * Copyright (c) 2014 Broadcom Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt)                            "bcmgenet_wol: " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <linux/clk.h>
+#include <linux/version.h>
+#include <linux/platform_device.h>
+#include <net/arp.h>
+
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+#include <linux/inetdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/phy.h>
+
+#include "bcmgenet.h"
+
+/* ethtool function - get WOL (Wake on LAN) settings, Only Magic Packet
+ * Detection is supported through ethtool
+ */
+void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       u32 reg;
+
+       wol->supported = WAKE_MAGIC | WAKE_MAGICSECURE;
+       wol->wolopts = priv->wolopts;
+       memset(wol->sopass, 0, sizeof(wol->sopass));
+
+       if (wol->wolopts & WAKE_MAGICSECURE) {
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_MS);
+               put_unaligned_be16(reg, &wol->sopass[0]);
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_PW_LS);
+               put_unaligned_be32(reg, &wol->sopass[2]);
+       }
+}
+
+/* ethtool function - set WOL (Wake on LAN) settings.
+ * Only for magic packet detection mode.
+ */
+int bcmgenet_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+       struct bcmgenet_priv *priv = netdev_priv(dev);
+       struct device *kdev = &priv->pdev->dev;
+       u32 reg;
+
+       if (!device_can_wakeup(kdev))
+               return -ENOTSUPP;
+
+       if (wol->wolopts & ~(WAKE_MAGIC | WAKE_MAGICSECURE))
+               return -EINVAL;
+
+       if (wol->wolopts & WAKE_MAGICSECURE) {
+               bcmgenet_umac_writel(priv, get_unaligned_be16(&wol->sopass[0]),
+                                    UMAC_MPD_PW_MS);
+               bcmgenet_umac_writel(priv, get_unaligned_be32(&wol->sopass[2]),
+                                    UMAC_MPD_PW_LS);
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+               reg |= MPD_PW_EN;
+               bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+       }
+
+       /* Flag the device and relevant IRQ as wakeup capable */
+       if (wol->wolopts) {
+               device_set_wakeup_enable(kdev, 1);
+               enable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = false;
+       } else {
+               device_set_wakeup_enable(kdev, 0);
+               /* Avoid unbalanced disable_irq_wake calls */
+               if (!priv->wol_irq_disabled)
+                       disable_irq_wake(priv->wol_irq);
+               priv->wol_irq_disabled = true;
+       }
+
+       priv->wolopts = wol->wolopts;
+
+       return 0;
+}
+
+static int bcmgenet_poll_wol_status(struct bcmgenet_priv *priv)
+{
+       struct net_device *dev = priv->dev;
+       int retries = 0;
+
+       while (!(bcmgenet_rbuf_readl(priv, RBUF_STATUS)
+               & RBUF_STATUS_WOL)) {
+               retries++;
+               if (retries > 5) {
+                       netdev_crit(dev, "polling wol mode timeout\n");
+                       return -ETIMEDOUT;
+               }
+               mdelay(1);
+       }
+
+       return retries;
+}
+
+int bcmgenet_wol_power_down_cfg(struct bcmgenet_priv *priv,
+                               enum bcmgenet_power_mode mode)
+{
+       struct net_device *dev = priv->dev;
+       u32 cpu_mask_clear;
+       int retries = 0;
+       u32 reg;
+
+       if (mode != GENET_POWER_WOL_MAGIC) {
+               netif_err(priv, wol, dev, "unsupported mode: %d\n", mode);
+               return -EINVAL;
+       }
+
+       /* disable RX */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       mdelay(10);
+
+       reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+       reg |= MPD_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       /* Do not leave UniMAC in MPD mode only */
+       retries = bcmgenet_poll_wol_status(priv);
+       if (retries < 0) {
+               reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+               reg &= ~MPD_EN;
+               bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+               return retries;
+       }
+
+       netif_dbg(priv, wol, dev, "MPD WOL-ready status set after %d msec\n",
+                 retries);
+
+       /* Enable CRC forward */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       priv->crc_fwd_en = 1;
+       reg |= CMD_CRC_FWD;
+
+       /* Receiver must be enabled for WOL MP detection */
+       reg |= CMD_RX_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+
+       if (priv->hw_params->flags & GENET_HAS_EXT) {
+               reg = bcmgenet_ext_readl(priv, EXT_EXT_PWR_MGMT);
+               reg &= ~EXT_ENERGY_DET_MASK;
+               bcmgenet_ext_writel(priv, reg, EXT_EXT_PWR_MGMT);
+       }
+
+       /* Enable the MPD interrupt */
+       cpu_mask_clear = UMAC_IRQ_MPD_R;
+
+       bcmgenet_intrl2_0_writel(priv, cpu_mask_clear, INTRL2_CPU_MASK_CLEAR);
+
+       return 0;
+}
+
+void bcmgenet_wol_power_up_cfg(struct bcmgenet_priv *priv,
+                              enum bcmgenet_power_mode mode)
+{
+       u32 cpu_mask_set;
+       u32 reg;
+
+       if (mode != GENET_POWER_WOL_MAGIC) {
+               netif_err(priv, wol, priv->dev, "invalid mode: %d\n", mode);
+               return;
+       }
+
+       reg = bcmgenet_umac_readl(priv, UMAC_MPD_CTRL);
+       reg &= ~MPD_EN;
+       bcmgenet_umac_writel(priv, reg, UMAC_MPD_CTRL);
+
+       /* Disable CRC Forward */
+       reg = bcmgenet_umac_readl(priv, UMAC_CMD);
+       reg &= ~CMD_CRC_FWD;
+       bcmgenet_umac_writel(priv, reg, UMAC_CMD);
+       priv->crc_fwd_en = 0;
+
+       /* Stop monitoring magic packet IRQ */
+       cpu_mask_set = UMAC_IRQ_MPD_R;
+
+       /* Stop monitoring magic packet IRQ */
+       bcmgenet_intrl2_0_writel(priv, cpu_mask_set, INTRL2_CPU_MASK_SET);
+}
index add8d85..1896161 100644 (file)
@@ -6,15 +6,6 @@
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
  * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
  */
 
 
@@ -44,15 +35,15 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
        u32 reg;
 
        bcmgenet_umac_writel(priv, (MDIO_RD | (phy_id << MDIO_PMD_SHIFT) |
-                       (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
+                            (location << MDIO_REG_SHIFT)), UMAC_MDIO_CMD);
        /* Start MDIO transaction*/
        reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
        reg |= MDIO_START_BUSY;
        bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
        wait_event_timeout(priv->wq,
-                       !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
-                               & MDIO_START_BUSY),
-                       HZ / 100);
+                          !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD)
+                          & MDIO_START_BUSY),
+                          HZ / 100);
        ret = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
 
        if (ret & MDIO_READ_FAIL)
@@ -63,22 +54,22 @@ static int bcmgenet_mii_read(struct mii_bus *bus, int phy_id, int location)
 
 /* write a value to the MII */
 static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
-                       int location, u16 val)
+                             int location, u16 val)
 {
        struct net_device *dev = bus->priv;
        struct bcmgenet_priv *priv = netdev_priv(dev);
        u32 reg;
 
        bcmgenet_umac_writel(priv, (MDIO_WR | (phy_id << MDIO_PMD_SHIFT) |
-                       (location << MDIO_REG_SHIFT) | (0xffff & val)),
-                       UMAC_MDIO_CMD);
+                            (location << MDIO_REG_SHIFT) | (0xffff & val)),
+                            UMAC_MDIO_CMD);
        reg = bcmgenet_umac_readl(priv, UMAC_MDIO_CMD);
        reg |= MDIO_START_BUSY;
        bcmgenet_umac_writel(priv, reg, UMAC_MDIO_CMD);
        wait_event_timeout(priv->wq,
-                       !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
-                               MDIO_START_BUSY),
-                       HZ / 100);
+                          !(bcmgenet_umac_readl(priv, UMAC_MDIO_CMD) &
+                          MDIO_START_BUSY),
+                          HZ / 100);
 
        return 0;
 }
@@ -136,17 +127,18 @@ static void bcmgenet_mii_setup(struct net_device *dev)
                /* pause capability */
                if (!phydev->pause)
                        cmd_bits |= CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE;
+       }
 
+       if (status_changed) {
                reg = bcmgenet_umac_readl(priv, UMAC_CMD);
                reg &= ~((CMD_SPEED_MASK << CMD_SPEED_SHIFT) |
                               CMD_HD_EN |
                               CMD_RX_PAUSE_IGNORE | CMD_TX_PAUSE_IGNORE);
                reg |= cmd_bits;
                bcmgenet_umac_writel(priv, reg, UMAC_CMD);
-       }
 
-       if (status_changed)
                phy_print_status(phydev);
+       }
 }
 
 void bcmgenet_mii_reset(struct net_device *dev)
@@ -247,7 +239,7 @@ int bcmgenet_mii_config(struct net_device *dev)
                phy_name = "external MII";
                phydev->supported &= PHY_BASIC_FEATURES;
                bcmgenet_sys_writel(priv,
-                               PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
+                                   PORT_MODE_EXT_EPHY, SYS_PORT_CTRL);
                break;
 
        case PHY_INTERFACE_MODE_REVMII:
@@ -283,7 +275,7 @@ int bcmgenet_mii_config(struct net_device *dev)
                reg |= RGMII_MODE_EN | id_mode_dis;
                bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
                bcmgenet_sys_writel(priv,
-                               PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
+                                   PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
                break;
        default:
                dev_err(kdev, "unknown phy mode: %d\n", priv->phy_interface);
@@ -362,7 +354,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
                priv->mii_bus->irq[phydev->addr] = PHY_POLL;
 
        pr_info("attached PHY at address %d [%s]\n",
-                       phydev->addr, phydev->drv->name);
+               phydev->addr, phydev->drv->name);
 
        return 0;
 }
@@ -387,9 +379,9 @@ static int bcmgenet_mii_alloc(struct bcmgenet_priv *priv)
        bus->read = bcmgenet_mii_read;
        bus->write = bcmgenet_mii_write;
        snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d",
-                       priv->pdev->name, priv->pdev->id);
+                priv->pdev->name, priv->pdev->id);
 
-       bus->irq = kzalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
        if (!bus->irq) {
                mdiobus_free(priv->mii_bus);
                return -ENOMEM;
index df2792d..8afa579 100644 (file)
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
        return 0;
 }
 
-#define NVRAM_CMD_TIMEOUT 100
+#define NVRAM_CMD_TIMEOUT 5000
 
 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 {
@@ -3232,7 +3232,7 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
 
        tw32(NVRAM_CMD, nvram_cmd);
        for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
-               udelay(10);
+               usleep_range(10, 40);
                if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
                        udelay(10);
                        break;
@@ -7854,8 +7854,8 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
                netif_wake_queue(tp->dev);
        }
 
-       segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
-       if (IS_ERR(segs))
+       segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
+       if (IS_ERR(segs) || !segs)
                goto tg3_tso_bug_end;
 
        do {
index 6a68e8d..6f72771 100644 (file)
@@ -68,10 +68,8 @@ bfa_cb_image_get_chunk(enum bfi_asic_gen asic_gen, u32 off)
        switch (asic_gen) {
        case BFI_ASIC_GEN_CT:
                return (bfi_image_ct_cna + off);
-               break;
        case BFI_ASIC_GEN_CT2:
                return (bfi_image_ct2_cna + off);
-               break;
        default:
                return NULL;
        }
@@ -83,10 +81,8 @@ bfa_cb_image_get_size(enum bfi_asic_gen asic_gen)
        switch (asic_gen) {
        case BFI_ASIC_GEN_CT:
                return bfi_image_ct_cna_size;
-               break;
        case BFI_ASIC_GEN_CT2:
                return bfi_image_ct2_cna_size;
-               break;
        default:
                return 0;
        }
index e9daa07..ca5d779 100644 (file)
@@ -52,6 +52,9 @@
                                        | MACB_BIT(TXERR))
 #define MACB_TX_INT_FLAGS      (MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
 
+#define MACB_MAX_TX_LEN                ((unsigned int)((1 << MACB_TX_FRMLEN_SIZE) - 1))
+#define GEM_MAX_TX_LEN         ((unsigned int)((1 << GEM_TX_FRMLEN_SIZE) - 1))
+
 /*
  * Graceful stop timeouts in us. We should allow up to
  * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
@@ -264,7 +267,8 @@ static void macb_handle_link_change(struct net_device *dev)
                                reg |= MACB_BIT(FD);
                        if (phydev->speed == SPEED_100)
                                reg |= MACB_BIT(SPD);
-                       if (phydev->speed == SPEED_1000)
+                       if (phydev->speed == SPEED_1000 &&
+                           bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
                                reg |= GEM_BIT(GBE);
 
                        macb_or_gem_writel(bp, NCFGR, reg);
@@ -337,7 +341,7 @@ static int macb_mii_probe(struct net_device *dev)
        }
 
        /* mask with MAC supported features */
-       if (macb_is_gem(bp))
+       if (macb_is_gem(bp) && bp->caps & MACB_CAPS_GIGABIT_MODE_AVAILABLE)
                phydev->supported &= PHY_GBIT_FEATURES;
        else
                phydev->supported &= PHY_BASIC_FEATURES;
@@ -467,6 +471,24 @@ static int macb_halt_tx(struct macb *bp)
        return -ETIMEDOUT;
 }
 
+static void macb_tx_unmap(struct macb *bp, struct macb_tx_skb *tx_skb)
+{
+       if (tx_skb->mapping) {
+               if (tx_skb->mapped_as_page)
+                       dma_unmap_page(&bp->pdev->dev, tx_skb->mapping,
+                                      tx_skb->size, DMA_TO_DEVICE);
+               else
+                       dma_unmap_single(&bp->pdev->dev, tx_skb->mapping,
+                                        tx_skb->size, DMA_TO_DEVICE);
+               tx_skb->mapping = 0;
+       }
+
+       if (tx_skb->skb) {
+               dev_kfree_skb_any(tx_skb->skb);
+               tx_skb->skb = NULL;
+       }
+}
+
 static void macb_tx_error_task(struct work_struct *work)
 {
        struct macb     *bp = container_of(work, struct macb, tx_error_task);
@@ -504,10 +526,23 @@ static void macb_tx_error_task(struct work_struct *work)
                skb = tx_skb->skb;
 
                if (ctrl & MACB_BIT(TX_USED)) {
-                       netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
-                                   macb_tx_ring_wrap(tail), skb->data);
-                       bp->stats.tx_packets++;
-                       bp->stats.tx_bytes += skb->len;
+                       /* skb is set for the last buffer of the frame */
+                       while (!skb) {
+                               macb_tx_unmap(bp, tx_skb);
+                               tail++;
+                               tx_skb = macb_tx_skb(bp, tail);
+                               skb = tx_skb->skb;
+                       }
+
+                       /* ctrl still refers to the first buffer descriptor
+                        * since it's the only one written back by the hardware
+                        */
+                       if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
+                               netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+                                           macb_tx_ring_wrap(tail), skb->data);
+                               bp->stats.tx_packets++;
+                               bp->stats.tx_bytes += skb->len;
+                       }
                } else {
                        /*
                         * "Buffers exhausted mid-frame" errors may only happen
@@ -521,10 +556,7 @@ static void macb_tx_error_task(struct work_struct *work)
                        desc->ctrl = ctrl | MACB_BIT(TX_USED);
                }
 
-               dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
-                                DMA_TO_DEVICE);
-               tx_skb->skb = NULL;
-               dev_kfree_skb(skb);
+               macb_tx_unmap(bp, tx_skb);
        }
 
        /* Make descriptor updates visible to hardware */
@@ -572,20 +604,35 @@ static void macb_tx_interrupt(struct macb *bp)
 
                ctrl = desc->ctrl;
 
+               /* TX_USED bit is only set by hardware on the very first buffer
+                * descriptor of the transmitted frame.
+                */
                if (!(ctrl & MACB_BIT(TX_USED)))
                        break;
 
-               tx_skb = macb_tx_skb(bp, tail);
-               skb = tx_skb->skb;
+               /* Process all buffers of the current transmitted frame */
+               for (;; tail++) {
+                       tx_skb = macb_tx_skb(bp, tail);
+                       skb = tx_skb->skb;
+
+                       /* First, update TX stats if needed */
+                       if (skb) {
+                               netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+                                           macb_tx_ring_wrap(tail), skb->data);
+                               bp->stats.tx_packets++;
+                               bp->stats.tx_bytes += skb->len;
+                       }
 
-               netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
-                       macb_tx_ring_wrap(tail), skb->data);
-               dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
-                                DMA_TO_DEVICE);
-               bp->stats.tx_packets++;
-               bp->stats.tx_bytes += skb->len;
-               tx_skb->skb = NULL;
-               dev_kfree_skb_irq(skb);
+                       /* Now we can safely release resources */
+                       macb_tx_unmap(bp, tx_skb);
+
+                       /* skb is set only for the last buffer of the frame.
+                        * WARNING: at this point skb has been freed by
+                        * macb_tx_unmap().
+                        */
+                       if (skb)
+                               break;
+               }
        }
 
        bp->tx_tail = tail;
@@ -718,6 +765,10 @@ static int gem_rx(struct macb *bp, int budget)
 
                skb->protocol = eth_type_trans(skb, bp->dev);
                skb_checksum_none_assert(skb);
+               if (bp->dev->features & NETIF_F_RXCSUM &&
+                   !(bp->dev->flags & IFF_PROMISC) &&
+                   GEM_BFEXT(RX_CSUM, ctrl) & GEM_RX_CSUM_CHECKED_MASK)
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
 
                bp->stats.rx_packets++;
                bp->stats.rx_bytes += skb->len;
@@ -1001,15 +1052,145 @@ static void macb_poll_controller(struct net_device *dev)
 }
 #endif
 
-static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
+                                                    unsigned int len)
+{
+       return (len + bp->max_tx_length - 1) / bp->max_tx_length;
+}
+
+static unsigned int macb_tx_map(struct macb *bp,
+                               struct sk_buff *skb)
 {
-       struct macb *bp = netdev_priv(dev);
        dma_addr_t mapping;
-       unsigned int len, entry;
+       unsigned int len, entry, i, tx_head = bp->tx_head;
+       struct macb_tx_skb *tx_skb = NULL;
        struct macb_dma_desc *desc;
-       struct macb_tx_skb *tx_skb;
+       unsigned int offset, size, count = 0;
+       unsigned int f, nr_frags = skb_shinfo(skb)->nr_frags;
+       unsigned int eof = 1;
        u32 ctrl;
+
+       /* First, map non-paged data */
+       len = skb_headlen(skb);
+       offset = 0;
+       while (len) {
+               size = min(len, bp->max_tx_length);
+               entry = macb_tx_ring_wrap(tx_head);
+               tx_skb = &bp->tx_skb[entry];
+
+               mapping = dma_map_single(&bp->pdev->dev,
+                                        skb->data + offset,
+                                        size, DMA_TO_DEVICE);
+               if (dma_mapping_error(&bp->pdev->dev, mapping))
+                       goto dma_error;
+
+               /* Save info to properly release resources */
+               tx_skb->skb = NULL;
+               tx_skb->mapping = mapping;
+               tx_skb->size = size;
+               tx_skb->mapped_as_page = false;
+
+               len -= size;
+               offset += size;
+               count++;
+               tx_head++;
+       }
+
+       /* Then, map paged data from fragments */
+       for (f = 0; f < nr_frags; f++) {
+               const skb_frag_t *frag = &skb_shinfo(skb)->frags[f];
+
+               len = skb_frag_size(frag);
+               offset = 0;
+               while (len) {
+                       size = min(len, bp->max_tx_length);
+                       entry = macb_tx_ring_wrap(tx_head);
+                       tx_skb = &bp->tx_skb[entry];
+
+                       mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
+                                                  offset, size, DMA_TO_DEVICE);
+                       if (dma_mapping_error(&bp->pdev->dev, mapping))
+                               goto dma_error;
+
+                       /* Save info to properly release resources */
+                       tx_skb->skb = NULL;
+                       tx_skb->mapping = mapping;
+                       tx_skb->size = size;
+                       tx_skb->mapped_as_page = true;
+
+                       len -= size;
+                       offset += size;
+                       count++;
+                       tx_head++;
+               }
+       }
+
+       /* Should never happen */
+       if (unlikely(tx_skb == NULL)) {
+               netdev_err(bp->dev, "BUG! empty skb!\n");
+               return 0;
+       }
+
+       /* This is the last buffer of the frame: save socket buffer */
+       tx_skb->skb = skb;
+
+       /* Update TX ring: update buffer descriptors in reverse order
+        * to avoid race condition
+        */
+
+       /* Set 'TX_USED' bit in buffer descriptor at tx_head position
+        * to set the end of TX queue
+        */
+       i = tx_head;
+       entry = macb_tx_ring_wrap(i);
+       ctrl = MACB_BIT(TX_USED);
+       desc = &bp->tx_ring[entry];
+       desc->ctrl = ctrl;
+
+       do {
+               i--;
+               entry = macb_tx_ring_wrap(i);
+               tx_skb = &bp->tx_skb[entry];
+               desc = &bp->tx_ring[entry];
+
+               ctrl = (u32)tx_skb->size;
+               if (eof) {
+                       ctrl |= MACB_BIT(TX_LAST);
+                       eof = 0;
+               }
+               if (unlikely(entry == (TX_RING_SIZE - 1)))
+                       ctrl |= MACB_BIT(TX_WRAP);
+
+               /* Set TX buffer descriptor */
+               desc->addr = tx_skb->mapping;
+               /* desc->addr must be visible to hardware before clearing
+                * 'TX_USED' bit in desc->ctrl.
+                */
+               wmb();
+               desc->ctrl = ctrl;
+       } while (i != bp->tx_head);
+
+       bp->tx_head = tx_head;
+
+       return count;
+
+dma_error:
+       netdev_err(bp->dev, "TX DMA map failed\n");
+
+       for (i = bp->tx_head; i != tx_head; i++) {
+               tx_skb = macb_tx_skb(bp, i);
+
+               macb_tx_unmap(bp, tx_skb);
+       }
+
+       return 0;
+}
+
+static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+       struct macb *bp = netdev_priv(dev);
        unsigned long flags;
+       unsigned int count, nr_frags, frag_size, f;
 
 #if defined(DEBUG) && defined(VERBOSE_DEBUG)
        netdev_vdbg(bp->dev,
@@ -1020,44 +1201,34 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
                       skb->data, 16, true);
 #endif
 
-       len = skb->len;
+       /* Count how many TX buffer descriptors are needed to send this
+        * socket buffer: skb fragments of jumbo frames may need to be
+        * splitted into many buffer descriptors.
+        */
+       count = macb_count_tx_descriptors(bp, skb_headlen(skb));
+       nr_frags = skb_shinfo(skb)->nr_frags;
+       for (f = 0; f < nr_frags; f++) {
+               frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
+               count += macb_count_tx_descriptors(bp, frag_size);
+       }
+
        spin_lock_irqsave(&bp->lock, flags);
 
        /* This is a hard error, log it. */
-       if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
+       if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < count) {
                netif_stop_queue(dev);
                spin_unlock_irqrestore(&bp->lock, flags);
-               netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
                netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
                           bp->tx_head, bp->tx_tail);
                return NETDEV_TX_BUSY;
        }
 
-       entry = macb_tx_ring_wrap(bp->tx_head);
-       netdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
-       mapping = dma_map_single(&bp->pdev->dev, skb->data,
-                                len, DMA_TO_DEVICE);
-       if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+       /* Map socket buffer for DMA transfer */
+       if (!macb_tx_map(bp, skb)) {
                dev_kfree_skb_any(skb);
                goto unlock;
        }
 
-       bp->tx_head++;
-       tx_skb = &bp->tx_skb[entry];
-       tx_skb->skb = skb;
-       tx_skb->mapping = mapping;
-       netdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
-                  skb->data, (unsigned long)mapping);
-
-       ctrl = MACB_BF(TX_FRMLEN, len);
-       ctrl |= MACB_BIT(TX_LAST);
-       if (entry == (TX_RING_SIZE - 1))
-               ctrl |= MACB_BIT(TX_WRAP);
-
-       desc = &bp->tx_ring[entry];
-       desc->addr = mapping;
-       desc->ctrl = ctrl;
-
        /* Make newly initialized descriptor visible to hardware */
        wmb();
 
@@ -1342,7 +1513,7 @@ static u32 macb_dbw(struct macb *bp)
 /*
  * Configure the receive DMA engine
  * - use the correct receive buffer size
- * - set the possibility to use INCR16 bursts
+ * - set best burst length for DMA operations
  *   (if not supported by FIFO, it will fallback to default)
  * - set both rx/tx packet buffers to full memory size
  * These are configurable parameters for GEM.
@@ -1354,24 +1525,20 @@ static void macb_configure_dma(struct macb *bp)
        if (macb_is_gem(bp)) {
                dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
                dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
-               dmacfg |= GEM_BF(FBLDO, 16);
+               if (bp->dma_burst_length)
+                       dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
                dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
                dmacfg &= ~GEM_BIT(ENDIA);
+               if (bp->dev->features & NETIF_F_HW_CSUM)
+                       dmacfg |= GEM_BIT(TXCOEN);
+               else
+                       dmacfg &= ~GEM_BIT(TXCOEN);
+               netdev_dbg(bp->dev, "Cadence configure DMA with 0x%08x\n",
+                          dmacfg);
                gem_writel(bp, DMACFG, dmacfg);
        }
 }
 
-/*
- * Configure peripheral capacities according to integration options used
- */
-static void macb_configure_caps(struct macb *bp)
-{
-       if (macb_is_gem(bp)) {
-               if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
-                       bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
-       }
-}
-
 static void macb_init_hw(struct macb *bp)
 {
        u32 config;
@@ -1386,6 +1553,8 @@ static void macb_init_hw(struct macb *bp)
        config |= MACB_BIT(BIG);                /* Receive oversized frames */
        if (bp->dev->flags & IFF_PROMISC)
                config |= MACB_BIT(CAF);        /* Copy All Frames */
+       else if (macb_is_gem(bp) && bp->dev->features & NETIF_F_RXCSUM)
+               config |= GEM_BIT(RXCOEN);
        if (!(bp->dev->flags & IFF_BROADCAST))
                config |= MACB_BIT(NBC);        /* No BroadCast */
        config |= macb_dbw(bp);
@@ -1394,7 +1563,6 @@ static void macb_init_hw(struct macb *bp)
        bp->duplex = DUPLEX_HALF;
 
        macb_configure_dma(bp);
-       macb_configure_caps(bp);
 
        /* Initialize TX and RX buffers */
        macb_writel(bp, RBQP, bp->rx_ring_dma);
@@ -1500,13 +1668,22 @@ void macb_set_rx_mode(struct net_device *dev)
 
        cfg = macb_readl(bp, NCFGR);
 
-       if (dev->flags & IFF_PROMISC)
+       if (dev->flags & IFF_PROMISC) {
                /* Enable promiscuous mode */
                cfg |= MACB_BIT(CAF);
-       else if (dev->flags & (~IFF_PROMISC))
-                /* Disable promiscuous mode */
+
+               /* Disable RX checksum offload */
+               if (macb_is_gem(bp))
+                       cfg &= ~GEM_BIT(RXCOEN);
+       } else {
+               /* Disable promiscuous mode */
                cfg &= ~MACB_BIT(CAF);
 
+               /* Enable RX checksum offload only if requested */
+               if (macb_is_gem(bp) && dev->features & NETIF_F_RXCSUM)
+                       cfg |= GEM_BIT(RXCOEN);
+       }
+
        if (dev->flags & IFF_ALLMULTI) {
                /* Enable all multicast mode */
                macb_or_gem_writel(bp, HRB, -1);
@@ -1767,6 +1944,40 @@ int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 }
 EXPORT_SYMBOL_GPL(macb_ioctl);
 
+static int macb_set_features(struct net_device *netdev,
+                            netdev_features_t features)
+{
+       struct macb *bp = netdev_priv(netdev);
+       netdev_features_t changed = features ^ netdev->features;
+
+       /* TX checksum offload */
+       if ((changed & NETIF_F_HW_CSUM) && macb_is_gem(bp)) {
+               u32 dmacfg;
+
+               dmacfg = gem_readl(bp, DMACFG);
+               if (features & NETIF_F_HW_CSUM)
+                       dmacfg |= GEM_BIT(TXCOEN);
+               else
+                       dmacfg &= ~GEM_BIT(TXCOEN);
+               gem_writel(bp, DMACFG, dmacfg);
+       }
+
+       /* RX checksum offload */
+       if ((changed & NETIF_F_RXCSUM) && macb_is_gem(bp)) {
+               u32 netcfg;
+
+               netcfg = gem_readl(bp, NCFGR);
+               if (features & NETIF_F_RXCSUM &&
+                   !(netdev->flags & IFF_PROMISC))
+                       netcfg |= GEM_BIT(RXCOEN);
+               else
+                       netcfg &= ~GEM_BIT(RXCOEN);
+               gem_writel(bp, NCFGR, netcfg);
+       }
+
+       return 0;
+}
+
 static const struct net_device_ops macb_netdev_ops = {
        .ndo_open               = macb_open,
        .ndo_stop               = macb_close,
@@ -1780,20 +1991,77 @@ static const struct net_device_ops macb_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = macb_poll_controller,
 #endif
+       .ndo_set_features       = macb_set_features,
 };
 
 #if defined(CONFIG_OF)
+static struct macb_config pc302gem_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+       .dma_burst_length = 16,
+};
+
+static struct macb_config sama5d3_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
+       .dma_burst_length = 16,
+};
+
+static struct macb_config sama5d4_config = {
+       .caps = 0,
+       .dma_burst_length = 4,
+};
+
 static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "cdns,at32ap7000-macb" },
        { .compatible = "cdns,at91sam9260-macb" },
        { .compatible = "cdns,macb" },
-       { .compatible = "cdns,pc302-gem" },
-       { .compatible = "cdns,gem" },
+       { .compatible = "cdns,pc302-gem", .data = &pc302gem_config },
+       { .compatible = "cdns,gem", .data = &pc302gem_config },
+       { .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+       { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, macb_dt_ids);
 #endif
 
+/*
+ * Configure peripheral capacities according to device tree
+ * and integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+       u32 dcfg;
+       const struct of_device_id *match;
+       const struct macb_config *config;
+
+       if (bp->pdev->dev.of_node) {
+               match = of_match_node(macb_dt_ids, bp->pdev->dev.of_node);
+               if (match && match->data) {
+                       config = (const struct macb_config *)match->data;
+
+                       bp->caps = config->caps;
+                       /*
+                        * As we have access to the matching node, configure
+                        * DMA burst length as well
+                        */
+                       bp->dma_burst_length = config->dma_burst_length;
+               }
+       }
+
+       if (MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2)
+               bp->caps |= MACB_CAPS_MACB_IS_GEM;
+
+       if (macb_is_gem(bp)) {
+               dcfg = gem_readl(bp, DCFG1);
+               if (GEM_BFEXT(IRQCOR, dcfg) == 0)
+                       bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+               dcfg = gem_readl(bp, DCFG2);
+               if ((dcfg & (GEM_BIT(RX_PKT_BUFF) | GEM_BIT(TX_PKT_BUFF))) == 0)
+                       bp->caps |= MACB_CAPS_FIFO_MODE;
+       }
+
+       netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps);
+}
+
 static int __init macb_probe(struct platform_device *pdev)
 {
        struct macb_platform_data *pdata;
@@ -1828,9 +2096,6 @@ static int __init macb_probe(struct platform_device *pdev)
 
        SET_NETDEV_DEV(dev, &pdev->dev);
 
-       /* TODO: Actually, we have some interesting features... */
-       dev->features |= 0;
-
        bp = netdev_priv(dev);
        bp->pdev = pdev;
        bp->dev = dev;
@@ -1897,19 +2162,33 @@ static int __init macb_probe(struct platform_device *pdev)
 
        dev->base_addr = regs->start;
 
+       /* setup capacities */
+       macb_configure_caps(bp);
+
        /* setup appropriated routines according to adapter type */
        if (macb_is_gem(bp)) {
+               bp->max_tx_length = GEM_MAX_TX_LEN;
                bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
                bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
                bp->macbgem_ops.mog_init_rings = gem_init_rings;
                bp->macbgem_ops.mog_rx = gem_rx;
        } else {
+               bp->max_tx_length = MACB_MAX_TX_LEN;
                bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
                bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
                bp->macbgem_ops.mog_init_rings = macb_init_rings;
                bp->macbgem_ops.mog_rx = macb_rx;
        }
 
+       /* Set features */
+       dev->hw_features = NETIF_F_SG;
+       /* Checksum offload is only available on gem with packet buffer */
+       if (macb_is_gem(bp) && !(bp->caps & MACB_CAPS_FIFO_MODE))
+               dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_RXCSUM;
+       if (bp->caps & MACB_CAPS_SG_DISABLED)
+               dev->hw_features &= ~NETIF_F_SG;
+       dev->features = dev->hw_features;
+
        /* Set MII management clock divider */
        config = macb_mdc_clk_div(bp);
        config |= macb_dbw(bp);
index 51c0244..517c09d 100644 (file)
 #define GEM_CLK_SIZE                           3
 #define GEM_DBW_OFFSET                         21
 #define GEM_DBW_SIZE                           2
+#define GEM_RXCOEN_OFFSET                      24
+#define GEM_RXCOEN_SIZE                                1
 
 /* Constants for data bus width. */
 #define GEM_DBW32                              0
 #define GEM_DBWDEF_OFFSET                      25
 #define GEM_DBWDEF_SIZE                                3
 
+/* Bitfields in DCFG2. */
+#define GEM_RX_PKT_BUFF_OFFSET                 20
+#define GEM_RX_PKT_BUFF_SIZE                   1
+#define GEM_TX_PKT_BUFF_OFFSET                 21
+#define GEM_TX_PKT_BUFF_SIZE                   1
+
 /* Constants for CLK */
 #define MACB_CLK_DIV8                          0
 #define MACB_CLK_DIV16                         1
 #define MACB_MAN_CODE                          2
 
 /* Capability mask bits */
-#define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x1
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x00000001
+#define MACB_CAPS_FIFO_MODE                    0x10000000
+#define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
+#define MACB_CAPS_SG_DISABLED                  0x40000000
+#define MACB_CAPS_MACB_IS_GEM                  0x80000000
 
 /* Bit manipulation macros */
 #define MACB_BIT(name)                                 \
@@ -442,6 +454,14 @@ struct macb_dma_desc {
 #define MACB_RX_BROADCAST_OFFSET               31
 #define MACB_RX_BROADCAST_SIZE                 1
 
+/* RX checksum offload disabled: bit 24 clear in NCFGR */
+#define GEM_RX_TYPEID_MATCH_OFFSET             22
+#define GEM_RX_TYPEID_MATCH_SIZE               2
+
+/* RX checksum offload enabled: bit 24 set in NCFGR */
+#define GEM_RX_CSUM_OFFSET                     22
+#define GEM_RX_CSUM_SIZE                       2
+
 #define MACB_TX_FRMLEN_OFFSET                  0
 #define MACB_TX_FRMLEN_SIZE                    11
 #define MACB_TX_LAST_OFFSET                    15
@@ -459,14 +479,32 @@ struct macb_dma_desc {
 #define MACB_TX_USED_OFFSET                    31
 #define MACB_TX_USED_SIZE                      1
 
+#define GEM_TX_FRMLEN_OFFSET                   0
+#define GEM_TX_FRMLEN_SIZE                     14
+
+/* Buffer descriptor constants */
+#define GEM_RX_CSUM_NONE                       0
+#define GEM_RX_CSUM_IP_ONLY                    1
+#define GEM_RX_CSUM_IP_TCP                     2
+#define GEM_RX_CSUM_IP_UDP                     3
+
+/* limit RX checksum offload to TCP and UDP packets */
+#define GEM_RX_CSUM_CHECKED_MASK               2
+
 /**
  * struct macb_tx_skb - data about an skb which is being transmitted
- * @skb: skb currently being transmitted
- * @mapping: DMA address of the skb's data buffer
+ * @skb: skb currently being transmitted, only set for the last buffer
+ *       of the frame
+ * @mapping: DMA address of the skb's fragment buffer
+ * @size: size of the DMA mapped buffer
+ * @mapped_as_page: true when buffer was mapped with skb_frag_dma_map(),
+ *                  false when buffer was mapped with dma_map_single()
  */
 struct macb_tx_skb {
        struct sk_buff          *skb;
        dma_addr_t              mapping;
+       size_t                  size;
+       bool                    mapped_as_page;
 };
 
 /*
@@ -554,6 +592,11 @@ struct macb_or_gem_ops {
        int     (*mog_rx)(struct macb *bp, int budget);
 };
 
+struct macb_config {
+       u32                     caps;
+       unsigned int            dma_burst_length;
+};
+
 struct macb {
        void __iomem            *regs;
 
@@ -595,6 +638,7 @@ struct macb {
        unsigned int            duplex;
 
        u32                     caps;
+       unsigned int            dma_burst_length;
 
        phy_interface_t         phy_interface;
 
@@ -602,6 +646,7 @@ struct macb {
        struct sk_buff *skb;                    /* holds skb until xmit interrupt completes */
        dma_addr_t skb_physaddr;                /* phys addr from pci_map_single */
        int skb_length;                         /* saved skb length for pci_unmap_single */
+       unsigned int            max_tx_length;
 };
 
 extern const struct ethtool_ops macb_ethtool_ops;
@@ -615,7 +660,7 @@ void macb_get_hwaddr(struct macb *bp);
 
 static inline bool macb_is_gem(struct macb *bp)
 {
-       return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+       return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
 }
 
 #endif /* _MACB_H */
index 570222c..c3ce9df 100644 (file)
@@ -86,6 +86,17 @@ config CHELSIO_T4
          To compile this driver as a module choose M here; the module
          will be called cxgb4.
 
+config CHELSIO_T4_DCB
+       bool "Data Center Bridging (DCB) Support for Chelsio T4/T5 cards"
+       default n
+       depends on CHELSIO_T4 && DCB
+       ---help---
+         Enable DCB support through rtNetlink interface.
+         Say Y here if you want to enable Data Center Bridging (DCB) support
+         in the driver.
+
+         If unsure, say N.
+
 config CHELSIO_T4VF
        tristate "Chelsio Communications T4/T5 Virtual Function Ethernet support"
        depends on PCI
index 4986674..1df65c9 100644 (file)
@@ -5,3 +5,4 @@
 obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
 
 cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
+cxgb4-$(CONFIG_CHELSIO_T4_DCB) +=  cxgb4_dcb.o
index f503dce..c9b922c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -85,7 +85,8 @@ enum {
        MEMWIN1_BASE_T5  = 0x52000,
        MEMWIN2_APERTURE = 65536,
        MEMWIN2_BASE     = 0x30000,
-       MEMWIN2_BASE_T5  = 0x54000,
+       MEMWIN2_APERTURE_T5 = 131072,
+       MEMWIN2_BASE_T5  = 0x60000,
 };
 
 enum dev_master {
@@ -309,6 +310,9 @@ struct adapter_params {
 
        unsigned int ofldq_wr_cred;
        bool ulptx_memwrite_dsgl;          /* use of T5 DSGL allowed */
+
+       unsigned int max_ordird_qp;       /* Max read depth per RDMA QP */
+       unsigned int max_ird_adapter;     /* Max read depth per adapter */
 };
 
 #include "t4fw_api.h"
@@ -373,6 +377,8 @@ enum {
 struct adapter;
 struct sge_rspq;
 
+#include "cxgb4_dcb.h"
+
 struct port_info {
        struct adapter *adapter;
        u16    viid;
@@ -389,6 +395,9 @@ struct port_info {
        u8     rss_mode;
        struct link_config link_cfg;
        u16   *rss;
+#ifdef CONFIG_CHELSIO_T4_DCB
+       struct port_dcb_info dcb;     /* Data Center Bridging support */
+#endif
 };
 
 struct dentry;
@@ -603,6 +612,7 @@ struct l2t_data;
 struct adapter {
        void __iomem *regs;
        void __iomem *bar2;
+       u32 t4_bar0;
        struct pci_dev *pdev;
        struct device *pdev_dev;
        unsigned int mbox;
@@ -647,6 +657,7 @@ struct adapter {
        struct dentry *debugfs_root;
 
        spinlock_t stats_lock;
+       spinlock_t win0_lock ____cacheline_aligned_in_smp;
 };
 
 /* Defined bit width of user definable filter tuples
@@ -855,6 +866,7 @@ void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
 void *t4_alloc_mem(size_t size);
 
 void t4_free_sge_resources(struct adapter *adap);
+void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q);
 irq_handler_t t4_intr_handler(struct adapter *adap);
 netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
 int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
@@ -941,6 +953,7 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
 void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
                      unsigned int data_reg, u32 *vals, unsigned int nregs,
                      unsigned int start_idx);
+void t4_hw_pci_read_cfg4(struct adapter *adapter, int reg, u32 *val);
 
 struct fw_filter_wr;
 
@@ -952,8 +965,17 @@ int t4_wait_dev_ready(struct adapter *adap);
 int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
                  struct link_config *lc);
 int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
-int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
-                   __be32 *buf);
+
+#define T4_MEMORY_WRITE        0
+#define T4_MEMORY_READ 1
+int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr, u32 len,
+                __be32 *buf, int dir);
+static inline int t4_memory_write(struct adapter *adap, int mtype, u32 addr,
+                                 u32 len, __be32 *buf)
+{
+       return t4_memory_rw(adap, 0, mtype, addr, len, buf, 0);
+}
+
 int t4_seeprom_wp(struct adapter *adapter, bool enable);
 int get_vpd_params(struct adapter *adapter, struct vpd_params *p);
 int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
@@ -1007,6 +1029,10 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
 int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
                  unsigned int vf, unsigned int nparams, const u32 *params,
                  const u32 *val);
+int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+                         unsigned int pf, unsigned int vf,
+                         unsigned int nparams, const u32 *params,
+                         const u32 *val);
 int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
                unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
                unsigned int rxqi, unsigned int rxq, unsigned int tc,
@@ -1025,6 +1051,8 @@ int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
                  int idx, const u8 *addr, bool persist, bool add_smt);
 int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
                     bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+                       unsigned int viid, bool rx_en, bool tx_en, bool dcb_en);
 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
                 bool rx_en, bool tx_en);
 int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
@@ -1045,7 +1073,6 @@ int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
 int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
 void t4_db_full(struct adapter *adapter);
 void t4_db_dropped(struct adapter *adapter);
-int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len);
 int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
                         u32 addr, u32 val);
 void t4_sge_decode_idma_state(struct adapter *adapter, int state);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
new file mode 100644 (file)
index 0000000..0d3a9df
--- /dev/null
@@ -0,0 +1,971 @@
+/*
+ *  Copyright (C) 2013-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Anish Bhatt (anish@chelsio.com)
+ *            Casey Leedom (leedom@chelsio.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#include "cxgb4.h"
+
+/* Initialize a port's Data Center Bridging state.  Typically used after a
+ * Link Down event.
+ */
+void cxgb4_dcb_state_init(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+
+       memset(dcb, 0, sizeof(struct port_dcb_info));
+       dcb->state = CXGB4_DCB_STATE_START;
+}
+
+/* Finite State machine for Data Center Bridging.
+ */
+void cxgb4_dcb_state_fsm(struct net_device *dev,
+                        enum cxgb4_dcb_state_input input)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+       struct adapter *adap = pi->adapter;
+
+       switch (input) {
+       case CXGB4_DCB_INPUT_FW_DISABLED: {
+               /* Firmware tells us it's not doing DCB */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_START: {
+                       /* we're going to use Host DCB */
+                       dcb->state = CXGB4_DCB_STATE_HOST;
+                       dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
+                       dcb->enabled = 1;
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_HOST: {
+                       /* we're alreaady in Host DCB mode */
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       case CXGB4_DCB_INPUT_FW_ENABLED: {
+               /* Firmware tells us that it is doing DCB */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_START: {
+                       /* we're going to use Firmware DCB */
+                       dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
+                       dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_FW_INCOMPLETE:
+               case CXGB4_DCB_STATE_FW_ALLSYNCED: {
+                       /* we're alreaady in firmware DCB mode */
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       case CXGB4_DCB_INPUT_FW_INCOMPLETE: {
+               /* Firmware tells us that its DCB state is incomplete */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_FW_INCOMPLETE: {
+                       /* we're already incomplete */
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_FW_ALLSYNCED: {
+                       /* We were successfully running with firmware DCB but
+                        * now it's telling us that it's in an "incomplete
+                        * state.  We need to reset back to a ground state
+                        * of incomplete.
+                        */
+                       cxgb4_dcb_state_init(dev);
+                       dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
+                       dcb->supported = CXGB4_DCBX_FW_SUPPORT;
+                       linkwatch_fire_event(dev);
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       case CXGB4_DCB_INPUT_FW_ALLSYNCED: {
+               /* Firmware tells us that its DCB state is complete */
+               switch (dcb->state) {
+               case CXGB4_DCB_STATE_FW_INCOMPLETE: {
+                       dcb->state = CXGB4_DCB_STATE_FW_ALLSYNCED;
+                       dcb->enabled = 1;
+                       linkwatch_fire_event(dev);
+                       break;
+               }
+
+               case CXGB4_DCB_STATE_FW_ALLSYNCED: {
+                       /* we're already all sync'ed */
+                       break;
+               }
+
+               default:
+                       goto bad_state_transition;
+               }
+               break;
+       }
+
+       default:
+               goto  bad_state_input;
+       }
+       return;
+
+bad_state_input:
+       dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: illegal input symbol %d\n",
+               input);
+       return;
+
+bad_state_transition:
+       dev_err(adap->pdev_dev, "cxgb4_dcb_state_fsm: bad state transition, state = %d, input = %d\n",
+               dcb->state, input);
+}
+
+/* Handle a DCB/DCBX update message from the firmware.
+ */
+void cxgb4_dcb_handle_fw_update(struct adapter *adap,
+                               const struct fw_port_cmd *pcmd)
+{
+       const union fw_port_dcb *fwdcb = &pcmd->u.dcb;
+       int port = FW_PORT_CMD_PORTID_GET(be32_to_cpu(pcmd->op_to_portid));
+       struct net_device *dev = adap->port[port];
+       struct port_info *pi = netdev_priv(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+       int dcb_type = pcmd->u.dcb.pgid.type;
+
+       /* Handle Firmware DCB Control messages separately since they drive
+        * our state machine.
+        */
+       if (dcb_type == FW_PORT_DCB_TYPE_CONTROL) {
+               enum cxgb4_dcb_state_input input =
+                       ((pcmd->u.dcb.control.all_syncd_pkd &
+                         FW_PORT_CMD_ALL_SYNCD)
+                        ? CXGB4_DCB_STATE_FW_ALLSYNCED
+                        : CXGB4_DCB_STATE_FW_INCOMPLETE);
+
+               cxgb4_dcb_state_fsm(dev, input);
+               return;
+       }
+
+       /* It's weird, and almost certainly an error, to get Firmware DCB
+        * messages when we either haven't been told whether we're going to be
+        * doing Host or Firmware DCB; and even worse when we've been told
+        * that we're doing Host DCB!
+        */
+       if (dcb->state == CXGB4_DCB_STATE_START ||
+           dcb->state == CXGB4_DCB_STATE_HOST) {
+               dev_err(adap->pdev_dev, "Receiving Firmware DCB messages in State %d\n",
+                       dcb->state);
+               return;
+       }
+
+       /* Now handle the general Firmware DCB update messages ...
+        */
+       switch (dcb_type) {
+       case FW_PORT_DCB_TYPE_PGID:
+               dcb->pgid = be32_to_cpu(fwdcb->pgid.pgid);
+               dcb->msgs |= CXGB4_DCB_FW_PGID;
+               break;
+
+       case FW_PORT_DCB_TYPE_PGRATE:
+               dcb->pg_num_tcs_supported = fwdcb->pgrate.num_tcs_supported;
+               memcpy(dcb->pgrate, &fwdcb->pgrate.pgrate,
+                      sizeof(dcb->pgrate));
+               dcb->msgs |= CXGB4_DCB_FW_PGRATE;
+               break;
+
+       case FW_PORT_DCB_TYPE_PRIORATE:
+               memcpy(dcb->priorate, &fwdcb->priorate.strict_priorate,
+                      sizeof(dcb->priorate));
+               dcb->msgs |= CXGB4_DCB_FW_PRIORATE;
+               break;
+
+       case FW_PORT_DCB_TYPE_PFC:
+               dcb->pfcen = fwdcb->pfc.pfcen;
+               dcb->pfc_num_tcs_supported = fwdcb->pfc.max_pfc_tcs;
+               dcb->msgs |= CXGB4_DCB_FW_PFC;
+               break;
+
+       case FW_PORT_DCB_TYPE_APP_ID: {
+               const struct fw_port_app_priority *fwap = &fwdcb->app_priority;
+               int idx = fwap->idx;
+               struct app_priority *ap = &dcb->app_priority[idx];
+
+               struct dcb_app app = {
+                       .selector = fwap->sel_field,
+                       .protocol = be16_to_cpu(fwap->protocolid),
+                       .priority = fwap->user_prio_map,
+               };
+               int err;
+
+               err = dcb_setapp(dev, &app);
+               if (err)
+                       dev_err(adap->pdev_dev,
+                               "Failed DCB Set Application Priority: sel=%d, prot=%d, prio=%d, err=%d\n",
+                               app.selector, app.protocol, app.priority, -err);
+
+               ap->user_prio_map = fwap->user_prio_map;
+               ap->sel_field = fwap->sel_field;
+               ap->protocolid = be16_to_cpu(fwap->protocolid);
+               dcb->msgs |= CXGB4_DCB_FW_APP_ID;
+               break;
+       }
+
+       default:
+               dev_err(adap->pdev_dev, "Unknown DCB update type received %x\n",
+                       dcb_type);
+               break;
+       }
+}
+
+/* Data Center Bridging netlink operations.
+ */
+
+
+/* Get current DCB enabled/disabled state.
+ */
+static u8 cxgb4_getstate(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       return pi->dcb.enabled;
+}
+
+/* Set DCB enabled/disabled.
+ */
+static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       /* Firmware doesn't provide any mechanism to control the DCB state.
+        */
+       if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
+               return 1;
+
+       return 0;
+}
+
+static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
+                            u8 *prio_type, u8 *pgid, u8 *bw_per,
+                            u8 *up_tc_map, int local)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       *prio_type = *pgid = *bw_per = *up_tc_map = 0;
+
+       if (local)
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       else
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+       pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+               return;
+       }
+       *pgid = (be32_to_cpu(pcmd.u.dcb.pgid.pgid) >> (tc * 4)) & 0xf;
+
+       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return;
+       }
+
+       *bw_per = pcmd.u.dcb.pgrate.pgrate[*pgid];
+       *up_tc_map = (1 << tc);
+
+       /* prio_type is link strict */
+       *prio_type = 0x2;
+}
+
+static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
+                               u8 *prio_type, u8 *pgid, u8 *bw_per,
+                               u8 *up_tc_map)
+{
+       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1);
+}
+
+
+static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
+                               u8 *prio_type, u8 *pgid, u8 *bw_per,
+                               u8 *up_tc_map)
+{
+       return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0);
+}
+
+static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
+                               u8 prio_type, u8 pgid, u8 bw_per,
+                               u8 up_tc_map)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       u32 _pgid;
+       int err;
+
+       if (pgid == DCB_ATTR_VALUE_UNDEFINED)
+               return;
+       if (bw_per == DCB_ATTR_VALUE_UNDEFINED)
+               return;
+
+       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+               return;
+       }
+
+       _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+       _pgid &= ~(0xF << (tc * 4));
+       _pgid |= pgid << (tc * 4);
+       pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB write PGID failed with %d\n",
+                       -err);
+               return;
+       }
+
+       memset(&pcmd, 0, sizeof(struct fw_port_cmd));
+
+       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return;
+       }
+
+       pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS)
+               dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
+                       -err);
+}
+
+static void cxgb4_getpgbwgcfg(struct net_device *dev, int pgid, u8 *bw_per,
+                             int local)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       if (local)
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       else
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+       } else {
+               *bw_per = pcmd.u.dcb.pgrate.pgrate[pgid];
+       }
+}
+
+static void cxgb4_getpgbwgcfg_tx(struct net_device *dev, int pgid, u8 *bw_per)
+{
+       return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 1);
+}
+
+static void cxgb4_getpgbwgcfg_rx(struct net_device *dev, int pgid, u8 *bw_per)
+{
+       return cxgb4_getpgbwgcfg(dev, pgid, bw_per, 0);
+}
+
+static void cxgb4_setpgbwgcfg_tx(struct net_device *dev, int pgid,
+                                u8 bw_per)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return;
+       }
+
+       pcmd.u.dcb.pgrate.pgrate[pgid] = bw_per;
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+       if (err != FW_PORT_DCB_CFG_SUCCESS)
+               dev_err(adap->pdev_dev, "DCB write PGRATE failed with %d\n",
+                       -err);
+}
+
+/* Return whether the specified Traffic Class Priority has Priority Pause
+ * Frames enabled.
+ */
+static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+
+       if (dcb->state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+           priority >= CXGB4_MAX_PRIORITY)
+               *pfccfg = 0;
+       else
+               *pfccfg = (pi->dcb.pfcen >> priority) & 1;
+}
+
+/* Enable/disable Priority Pause Frames for the specified Traffic Class
+ * Priority.
+ */
+static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int err;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED ||
+           priority >= CXGB4_MAX_PRIORITY)
+               return;
+
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       pcmd.u.dcb.pfc.type = FW_PORT_DCB_TYPE_PFC;
+       pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
+
+       if (pfccfg)
+               pcmd.u.dcb.pfc.pfcen |= (1 << priority);
+       else
+               pcmd.u.dcb.pfc.pfcen &= (~(1 << priority));
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB PFC write failed with %d\n", -err);
+               return;
+       }
+
+       pi->dcb.pfcen = pcmd.u.dcb.pfc.pfcen;
+}
+
+static u8 cxgb4_setall(struct net_device *dev)
+{
+       return 0;
+}
+
+/* Return DCB capabilities.
+ */
+static u8 cxgb4_getcap(struct net_device *dev, int cap_id, u8 *caps)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       switch (cap_id) {
+       case DCB_CAP_ATTR_PG:
+       case DCB_CAP_ATTR_PFC:
+               *caps = true;
+               break;
+
+       case DCB_CAP_ATTR_PG_TCS:
+               /* 8 priorities for PG represented by bitmap */
+               *caps = 0x80;
+               break;
+
+       case DCB_CAP_ATTR_PFC_TCS:
+               /* 8 priorities for PFC represented by bitmap */
+               *caps = 0x80;
+               break;
+
+       case DCB_CAP_ATTR_GSP:
+               *caps = true;
+               break;
+
+       case DCB_CAP_ATTR_UP2TC:
+       case DCB_CAP_ATTR_BCN:
+               *caps = false;
+               break;
+
+       case DCB_CAP_ATTR_DCBX:
+               *caps = pi->dcb.supported;
+               break;
+
+       default:
+               *caps = false;
+       }
+
+       return 0;
+}
+
+/* Return the number of Traffic Classes for the indicated Traffic Class ID.
+ */
+static int cxgb4_getnumtcs(struct net_device *dev, int tcs_id, u8 *num)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       switch (tcs_id) {
+       case DCB_NUMTCS_ATTR_PG:
+               if (pi->dcb.msgs & CXGB4_DCB_FW_PGRATE)
+                       *num = pi->dcb.pg_num_tcs_supported;
+               else
+                       *num = 0x8;
+               break;
+
+       case DCB_NUMTCS_ATTR_PFC:
+               *num = 0x8;
+               break;
+
+       default:
+               return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Set the number of Traffic Classes supported for the indicated Traffic Class
+ * ID.
+ */
+static int cxgb4_setnumtcs(struct net_device *dev, int tcs_id, u8 num)
+{
+       /* Setting the number of Traffic Classes isn't supported.
+        */
+       return -ENOSYS;
+}
+
+/* Return whether Priority Flow Control is enabled.  */
+static u8 cxgb4_getpfcstate(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return false;
+
+       return pi->dcb.pfcen != 0;
+}
+
+/* Enable/disable Priority Flow Control. */
+static void cxgb4_setpfcstate(struct net_device *dev, u8 state)
+{
+       /* We can't enable/disable Priority Flow Control but we also can't
+        * return an error ...
+        */
+}
+
+/* Return the Application User Priority Map associated with the specified
+ * Application ID.
+ */
+static int __cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id,
+                         int peer)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return 0;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               struct fw_port_cmd pcmd;
+               int err;
+
+               if (peer)
+                       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+               else
+                       INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = i;
+
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB APP read failed with %d\n",
+                               -err);
+                       return err;
+               }
+               if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id)
+                       return pcmd.u.dcb.app_priority.user_prio_map;
+
+               /* exhausted app list */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+       }
+
+       return -EEXIST;
+}
+
+/* Return the Application User Priority Map associated with the specified
+ * Application ID.
+ */
+static int cxgb4_getapp(struct net_device *dev, u8 app_idtype, u16 app_id)
+{
+       return __cxgb4_getapp(dev, app_idtype, app_id, 0);
+}
+
+/* Write a new Application User Priority Map for the specified Application ID
+ */
+static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
+                       u8 app_prio)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i, err;
+
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return -EINVAL;
+
+       /* DCB info gets thrown away on link up */
+       if (!netif_carrier_ok(dev))
+               return -ENOLINK;
+
+       if (app_idtype != DCB_APP_IDTYPE_ETHTYPE &&
+           app_idtype != DCB_APP_IDTYPE_PORTNUM)
+               return -EINVAL;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               INIT_PORT_DCB_READ_LOCAL_CMD(pcmd, pi->port_id);
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = i;
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+                               -err);
+                       return err;
+               }
+               if (be16_to_cpu(pcmd.u.dcb.app_priority.protocolid) == app_id) {
+                       /* overwrite existing app table */
+                       pcmd.u.dcb.app_priority.protocolid = 0;
+                       break;
+               }
+               /* find first empty slot */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+       }
+
+       if (i == CXGB4_MAX_DCBX_APP_SUPPORTED) {
+               /* no empty slots available */
+               dev_err(adap->pdev_dev, "DCB app table full\n");
+               return -EBUSY;
+       }
+
+       /* write out new app table entry */
+       INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
+       if (pi->dcb.state == CXGB4_DCB_STATE_HOST)
+               pcmd.op_to_portid |= cpu_to_be32(FW_PORT_CMD_APPLY);
+
+       pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+       pcmd.u.dcb.app_priority.protocolid = cpu_to_be16(app_id);
+       pcmd.u.dcb.app_priority.sel_field = app_idtype;
+       pcmd.u.dcb.app_priority.user_prio_map = app_prio;
+       pcmd.u.dcb.app_priority.idx = i;
+
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB app table write failed with %d\n",
+                       -err);
+               return err;
+       }
+
+       return 0;
+}
+
+/* Return whether IEEE Data Center Bridging has been negotiated.
+ */
+static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+       struct port_dcb_info *dcb = &pi->dcb;
+
+       return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
+               (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
+}
+
+/* Fill in the Application User Priority Map associated with the
+ * specified Application.
+ */
+static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
+{
+       int prio;
+
+       if (!cxgb4_ieee_negotiation_complete(dev))
+               return -EINVAL;
+       if (!(app->selector && app->protocol))
+               return -EINVAL;
+
+       prio = dcb_getapp(dev, app);
+       if (prio == 0) {
+               /* If app doesn't exist in dcb_app table, try firmware
+                * directly.
+                */
+               prio = __cxgb4_getapp(dev, app->selector, app->protocol, 0);
+       }
+
+       app->priority = prio;
+       return 0;
+}
+
+/* Write a new Application User Priority Map for the specified App id. */
+static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
+{
+       if (!cxgb4_ieee_negotiation_complete(dev))
+               return -EINVAL;
+       if (!(app->selector && app->protocol && app->priority))
+               return -EINVAL;
+
+       cxgb4_setapp(dev, app->selector, app->protocol, app->priority);
+       return dcb_setapp(dev, app);
+}
+
+/* Return our DCBX parameters.
+ */
+static u8 cxgb4_getdcbx(struct net_device *dev)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       /* This is already set by cxgb4_set_dcb_caps, so just return it */
+       return pi->dcb.supported;
+}
+
+/* Set our DCBX parameters.
+ */
+static u8 cxgb4_setdcbx(struct net_device *dev, u8 dcb_request)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       /* Filter out requests which exceed our capabilities.
+        */
+       if ((dcb_request & (CXGB4_DCBX_FW_SUPPORT | CXGB4_DCBX_HOST_SUPPORT))
+           != dcb_request)
+               return 1;
+
+       /* Can't set DCBX capabilities if DCBX isn't enabled. */
+       if (!pi->dcb.state)
+               return 1;
+
+       /* There's currently no mechanism to allow for the firmware DCBX
+        * negotiation to be changed from the Host Driver.  If the caller
+        * requests exactly the same parameters that we already have then
+        * we'll allow them to be successfully "set" ...
+        */
+       if (dcb_request != pi->dcb.supported)
+               return 1;
+
+       pi->dcb.supported = dcb_request;
+       return 0;
+}
+
+static int cxgb4_getpeer_app(struct net_device *dev,
+                            struct dcb_peer_app_info *info, u16 *app_count)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i, err = 0;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return 1;
+
+       info->willing = 0;
+       info->error = 0;
+
+       *app_count = 0;
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = *app_count;
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+                               -err);
+                       return err;
+               }
+
+               /* find first empty slot */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+       }
+       *app_count = i;
+       return err;
+}
+
+static int cxgb4_getpeerapp_tbl(struct net_device *dev, struct dcb_app *table)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       int i, err = 0;
+
+       if (pi->dcb.state != CXGB4_DCB_STATE_FW_ALLSYNCED)
+               return 1;
+
+       for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
+               INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+               pcmd.u.dcb.app_priority.type = FW_PORT_DCB_TYPE_APP_ID;
+               pcmd.u.dcb.app_priority.idx = i;
+               err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+
+               if (err != FW_PORT_DCB_CFG_SUCCESS) {
+                       dev_err(adap->pdev_dev, "DCB app table read failed with %d\n",
+                               -err);
+                       return err;
+               }
+
+               /* find first empty slot */
+               if (!pcmd.u.dcb.app_priority.protocolid)
+                       break;
+
+               table[i].selector = pcmd.u.dcb.app_priority.sel_field;
+               table[i].protocol =
+                       be16_to_cpu(pcmd.u.dcb.app_priority.protocolid);
+               table[i].priority = pcmd.u.dcb.app_priority.user_prio_map;
+       }
+       return err;
+}
+
+/* Return Priority Group information.
+ */
+static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
+{
+       struct fw_port_cmd pcmd;
+       struct port_info *pi = netdev2pinfo(dev);
+       struct adapter *adap = pi->adapter;
+       u32 pgid;
+       int i, err;
+
+       /* We're always "willing" -- the Switch Fabric always dictates the
+        * DCBX parameters to us.
+        */
+       pg->willing = true;
+
+       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgid.type = FW_PORT_DCB_TYPE_PGID;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGID failed with %d\n", -err);
+               return err;
+       }
+       pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
+
+       for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
+               pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF;
+
+       INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
+       pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
+       err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
+       if (err != FW_PORT_DCB_CFG_SUCCESS) {
+               dev_err(adap->pdev_dev, "DCB read PGRATE failed with %d\n",
+                       -err);
+               return err;
+       }
+
+       for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
+               pg->pg_bw[i] = pcmd.u.dcb.pgrate.pgrate[i];
+
+       return 0;
+}
+
+/* Return Priority Flow Control information.
+ */
+static int cxgb4_cee_peer_getpfc(struct net_device *dev, struct cee_pfc *pfc)
+{
+       struct port_info *pi = netdev2pinfo(dev);
+
+       cxgb4_getnumtcs(dev, DCB_NUMTCS_ATTR_PFC, &(pfc->tcs_supported));
+       pfc->pfc_en = pi->dcb.pfcen;
+
+       return 0;
+}
+
+const struct dcbnl_rtnl_ops cxgb4_dcb_ops = {
+       .ieee_getapp            = cxgb4_ieee_getapp,
+       .ieee_setapp            = cxgb4_ieee_setapp,
+
+       /* CEE std */
+       .getstate               = cxgb4_getstate,
+       .setstate               = cxgb4_setstate,
+       .getpgtccfgtx           = cxgb4_getpgtccfg_tx,
+       .getpgbwgcfgtx          = cxgb4_getpgbwgcfg_tx,
+       .getpgtccfgrx           = cxgb4_getpgtccfg_rx,
+       .getpgbwgcfgrx          = cxgb4_getpgbwgcfg_rx,
+       .setpgtccfgtx           = cxgb4_setpgtccfg_tx,
+       .setpgbwgcfgtx          = cxgb4_setpgbwgcfg_tx,
+       .setpfccfg              = cxgb4_setpfccfg,
+       .getpfccfg              = cxgb4_getpfccfg,
+       .setall                 = cxgb4_setall,
+       .getcap                 = cxgb4_getcap,
+       .getnumtcs              = cxgb4_getnumtcs,
+       .setnumtcs              = cxgb4_setnumtcs,
+       .getpfcstate            = cxgb4_getpfcstate,
+       .setpfcstate            = cxgb4_setpfcstate,
+       .getapp                 = cxgb4_getapp,
+       .setapp                 = cxgb4_setapp,
+
+       /* DCBX configuration */
+       .getdcbx                = cxgb4_getdcbx,
+       .setdcbx                = cxgb4_setdcbx,
+
+       /* peer apps */
+       .peer_getappinfo        = cxgb4_getpeer_app,
+       .peer_getapptable       = cxgb4_getpeerapp_tbl,
+
+       /* CEE peer */
+       .cee_peer_getpg         = cxgb4_cee_peer_getpg,
+       .cee_peer_getpfc        = cxgb4_cee_peer_getpfc,
+};
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.h
new file mode 100644 (file)
index 0000000..1ec1d83
--- /dev/null
@@ -0,0 +1,141 @@
+/*
+ *  Copyright (C) 2013-2014 Chelsio Communications.  All rights reserved.
+ *
+ *  Written by Anish Bhatt (anish@chelsio.com)
+ *
+ *  This program is free software; you can redistribute it and/or modify it
+ *  under the terms and conditions of the GNU General Public License,
+ *  version 2, as published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope it will be useful, but WITHOUT
+ *  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ *  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ *  more details.
+ *
+ *  The full GNU General Public License is included in this distribution in
+ *  the file called "COPYING".
+ *
+ */
+
+#ifndef __CXGB4_DCB_H
+#define __CXGB4_DCB_H
+
+#include <linux/netdevice.h>
+#include <linux/dcbnl.h>
+#include <net/dcbnl.h>
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+
+#define CXGB4_DCBX_FW_SUPPORT \
+       (DCB_CAP_DCBX_VER_CEE | \
+        DCB_CAP_DCBX_VER_IEEE | \
+        DCB_CAP_DCBX_LLD_MANAGED)
+#define CXGB4_DCBX_HOST_SUPPORT \
+       (DCB_CAP_DCBX_VER_CEE | \
+        DCB_CAP_DCBX_VER_IEEE | \
+        DCB_CAP_DCBX_HOST)
+
+#define CXGB4_MAX_PRIORITY      CXGB4_MAX_DCBX_APP_SUPPORTED
+#define CXGB4_MAX_TCS           CXGB4_MAX_DCBX_APP_SUPPORTED
+
+#define INIT_PORT_DCB_CMD(__pcmd, __port, __op, __action) \
+       do { \
+               memset(&(__pcmd), 0, sizeof(__pcmd)); \
+               (__pcmd).op_to_portid = \
+                       cpu_to_be32(FW_CMD_OP(FW_PORT_CMD) | \
+                                   FW_CMD_REQUEST | \
+                                   FW_CMD_##__op | \
+                                   FW_PORT_CMD_PORTID(__port)); \
+               (__pcmd).action_to_len16 = \
+                       cpu_to_be32(FW_PORT_CMD_ACTION(__action) | \
+                                   FW_LEN16(pcmd)); \
+       } while (0)
+
+#define INIT_PORT_DCB_READ_PEER_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_RECV)
+
+#define INIT_PORT_DCB_READ_LOCAL_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_TRANS)
+
+#define INIT_PORT_DCB_READ_SYNC_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, READ, FW_PORT_ACTION_DCB_READ_DET)
+
+#define INIT_PORT_DCB_WRITE_CMD(__pcmd, __port) \
+       INIT_PORT_DCB_CMD(__pcmd, __port, EXEC, FW_PORT_ACTION_L2_DCB_CFG)
+
+/* States we can be in for a port's Data Center Bridging.
+ */
+enum cxgb4_dcb_state {
+       CXGB4_DCB_STATE_START,          /* initial unknown state */
+       CXGB4_DCB_STATE_HOST,           /* we're using Host DCB (if at all) */
+       CXGB4_DCB_STATE_FW_INCOMPLETE,  /* using firmware DCB, incomplete */
+       CXGB4_DCB_STATE_FW_ALLSYNCED,   /* using firmware DCB, all sync'ed */
+};
+
+/* Data Center Bridging state input for the Finite State Machine.
+ */
+enum cxgb4_dcb_state_input {
+       /* Input from the firmware.
+        */
+       CXGB4_DCB_INPUT_FW_DISABLED,    /* firmware DCB disabled */
+       CXGB4_DCB_INPUT_FW_ENABLED,     /* firmware DCB enabled */
+       CXGB4_DCB_INPUT_FW_INCOMPLETE,  /* firmware reports incomplete DCB */
+       CXGB4_DCB_INPUT_FW_ALLSYNCED,   /* firmware reports all sync'ed */
+
+};
+
+/* Firmware DCB messages that we've received so far ...
+ */
+enum cxgb4_dcb_fw_msgs {
+       CXGB4_DCB_FW_PGID       = 0x01,
+       CXGB4_DCB_FW_PGRATE     = 0x02,
+       CXGB4_DCB_FW_PRIORATE   = 0x04,
+       CXGB4_DCB_FW_PFC        = 0x08,
+       CXGB4_DCB_FW_APP_ID     = 0x10,
+};
+
+#define CXGB4_MAX_DCBX_APP_SUPPORTED 8
+
+/* Data Center Bridging support;
+ */
+struct port_dcb_info {
+       enum cxgb4_dcb_state state;     /* DCB State Machine */
+       enum cxgb4_dcb_fw_msgs msgs;    /* DCB Firmware messages received */
+       unsigned int supported;         /* OS DCB capabilities supported */
+       bool enabled;                   /* OS Enabled state */
+
+       /* Cached copies of DCB information sent by the firmware (in Host
+        * Native Endian format).
+        */
+       u32     pgid;                   /* Priority Group[0..7] */
+       u8      pfcen;                  /* Priority Flow Control[0..7] */
+       u8      pg_num_tcs_supported;   /* max PG Traffic Classes */
+       u8      pfc_num_tcs_supported;  /* max PFC Traffic Classes */
+       u8      pgrate[8];              /* Priority Group Rate[0..7] */
+       u8      priorate[8];            /* Priority Rate[0..7] */
+       struct app_priority { /* Application Information */
+               u8      user_prio_map;  /* Priority Map bitfield */
+               u8      sel_field;      /* Protocol ID interpretation */
+               u16     protocolid;     /* Protocol ID */
+       } app_priority[CXGB4_MAX_DCBX_APP_SUPPORTED];
+};
+
+void cxgb4_dcb_state_init(struct net_device *);
+void cxgb4_dcb_state_fsm(struct net_device *, enum cxgb4_dcb_state_input);
+void cxgb4_dcb_handle_fw_update(struct adapter *, const struct fw_port_cmd *);
+void cxgb4_dcb_set_caps(struct adapter *, const struct fw_port_cmd *);
+extern const struct dcbnl_rtnl_ops cxgb4_dcb_ops;
+
+#define CXGB4_DCB_ENABLED true
+
+#else /* !CONFIG_CHELSIO_T4_DCB */
+
+static inline void cxgb4_dcb_state_init(struct net_device *dev)
+{
+}
+
+#define CXGB4_DCB_ENABLED false
+
+#endif /* !CONFIG_CHELSIO_T4_DCB */
+
+#endif /* __CXGB4_DCB_H */
index 2f8d6b9..8b46534 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -67,6 +67,7 @@
 #include "t4_regs.h"
 #include "t4_msg.h"
 #include "t4fw_api.h"
+#include "cxgb4_dcb.h"
 #include "l2t.h"
 
 #include <../drivers/net/bonding/bonding.h>
@@ -223,6 +224,17 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
        CH_DEVICE(0x4008, -1),
        CH_DEVICE(0x4009, -1),
        CH_DEVICE(0x400a, -1),
+       CH_DEVICE(0x400d, -1),
+       CH_DEVICE(0x400e, -1),
+       CH_DEVICE(0x4080, -1),
+       CH_DEVICE(0x4081, -1),
+       CH_DEVICE(0x4082, -1),
+       CH_DEVICE(0x4083, -1),
+       CH_DEVICE(0x4084, -1),
+       CH_DEVICE(0x4085, -1),
+       CH_DEVICE(0x4086, -1),
+       CH_DEVICE(0x4087, -1),
+       CH_DEVICE(0x4088, -1),
        CH_DEVICE(0x4401, 4),
        CH_DEVICE(0x4402, 4),
        CH_DEVICE(0x4403, 4),
@@ -235,6 +247,15 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
        CH_DEVICE(0x440a, 4),
        CH_DEVICE(0x440d, 4),
        CH_DEVICE(0x440e, 4),
+       CH_DEVICE(0x4480, 4),
+       CH_DEVICE(0x4481, 4),
+       CH_DEVICE(0x4482, 4),
+       CH_DEVICE(0x4483, 4),
+       CH_DEVICE(0x4484, 4),
+       CH_DEVICE(0x4485, 4),
+       CH_DEVICE(0x4486, 4),
+       CH_DEVICE(0x4487, 4),
+       CH_DEVICE(0x4488, 4),
        CH_DEVICE(0x5001, 4),
        CH_DEVICE(0x5002, 4),
        CH_DEVICE(0x5003, 4),
@@ -391,6 +412,17 @@ module_param_array(num_vf, uint, NULL, 0644);
 MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
 #endif
 
+/* TX Queue select used to determine what algorithm to use for selecting TX
+ * queue. Select between the kernel provided function (select_queue=0) or user
+ * cxgb_select_queue function (select_queue=1)
+ *
+ * Default: select_queue=0
+ */
+static int select_queue;
+module_param(select_queue, int, 0644);
+MODULE_PARM_DESC(select_queue,
+                "Select between kernel provided method of selecting or driver method of selecting TX queue. Default is kernel method.");
+
 /*
  * The filter TCAM has a fixed portion and a variable portion.  The fixed
  * portion can match on source/destination IP IPv4/IPv6 addresses and TCP/UDP
@@ -458,6 +490,42 @@ static void link_report(struct net_device *dev)
        }
 }
 
+#ifdef CONFIG_CHELSIO_T4_DCB
+/* Set up/tear down Data Center Bridging Priority mapping for a net device. */
+static void dcb_tx_queue_prio_enable(struct net_device *dev, int enable)
+{
+       struct port_info *pi = netdev_priv(dev);
+       struct adapter *adap = pi->adapter;
+       struct sge_eth_txq *txq = &adap->sge.ethtxq[pi->first_qset];
+       int i;
+
+       /* We use a simple mapping of Port TX Queue Index to DCB
+        * Priority when we're enabling DCB.
+        */
+       for (i = 0; i < pi->nqsets; i++, txq++) {
+               u32 name, value;
+               int err;
+
+               name = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+                       FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH) |
+                       FW_PARAMS_PARAM_YZ(txq->q.cntxt_id));
+               value = enable ? i : 0xffffffff;
+
+               /* Since we can be called while atomic (from "interrupt
+                * level") we need to issue the Set Parameters Commannd
+                * without sleeping (timeout < 0).
+                */
+               err = t4_set_params_nosleep(adap, adap->mbox, adap->fn, 0, 1,
+                                           &name, &value);
+
+               if (err)
+                       dev_err(adap->pdev_dev,
+                               "Can't %s DCB Priority on port %d, TX Queue %d: err=%d\n",
+                               enable ? "set" : "unset", pi->port_id, i, -err);
+       }
+}
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
 void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
 {
        struct net_device *dev = adapter->port[port_id];
@@ -466,8 +534,13 @@ void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
        if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
                if (link_stat)
                        netif_carrier_on(dev);
-               else
+               else {
+#ifdef CONFIG_CHELSIO_T4_DCB
+                       cxgb4_dcb_state_init(dev);
+                       dcb_tx_queue_prio_enable(dev, false);
+#endif /* CONFIG_CHELSIO_T4_DCB */
                        netif_carrier_off(dev);
+               }
 
                link_report(dev);
        }
@@ -601,10 +674,45 @@ static int link_start(struct net_device *dev)
                ret = t4_link_start(pi->adapter, mb, pi->tx_chan,
                                    &pi->link_cfg);
        if (ret == 0)
-               ret = t4_enable_vi(pi->adapter, mb, pi->viid, true, true);
+               ret = t4_enable_vi_params(pi->adapter, mb, pi->viid, true,
+                                         true, CXGB4_DCB_ENABLED);
+
        return ret;
 }
 
+int cxgb4_dcb_enabled(const struct net_device *dev)
+{
+#ifdef CONFIG_CHELSIO_T4_DCB
+       struct port_info *pi = netdev_priv(dev);
+
+       return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED;
+#else
+       return 0;
+#endif
+}
+EXPORT_SYMBOL(cxgb4_dcb_enabled);
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+/* Handle a Data Center Bridging update message from the firmware. */
+static void dcb_rpl(struct adapter *adap, const struct fw_port_cmd *pcmd)
+{
+       int port = FW_PORT_CMD_PORTID_GET(ntohl(pcmd->op_to_portid));
+       struct net_device *dev = adap->port[port];
+       int old_dcb_enabled = cxgb4_dcb_enabled(dev);
+       int new_dcb_enabled;
+
+       cxgb4_dcb_handle_fw_update(adap, pcmd);
+       new_dcb_enabled = cxgb4_dcb_enabled(dev);
+
+       /* If the DCB has become enabled or disabled on the port then we're
+        * going to need to set up/tear down DCB Priority parameters for the
+        * TX Queues associated with the port.
+        */
+       if (new_dcb_enabled != old_dcb_enabled)
+               dcb_tx_queue_prio_enable(dev, new_dcb_enabled);
+}
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
 /* Clear a filter and release any of its resources that we own.  This also
  * clears the filter's "pending" status.
  */
@@ -709,8 +817,32 @@ static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
        } else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
                const struct cpl_fw6_msg *p = (void *)rsp;
 
-               if (p->type == 0)
-                       t4_handle_fw_rpl(q->adap, p->data);
+#ifdef CONFIG_CHELSIO_T4_DCB
+               const struct fw_port_cmd *pcmd = (const void *)p->data;
+               unsigned int cmd = FW_CMD_OP_GET(ntohl(pcmd->op_to_portid));
+               unsigned int action =
+                       FW_PORT_CMD_ACTION_GET(ntohl(pcmd->action_to_len16));
+
+               if (cmd == FW_PORT_CMD &&
+                   action == FW_PORT_ACTION_GET_PORT_INFO) {
+                       int port = FW_PORT_CMD_PORTID_GET(
+                                       be32_to_cpu(pcmd->op_to_portid));
+                       struct net_device *dev = q->adap->port[port];
+                       int state_input = ((pcmd->u.info.dcbxdis_pkd &
+                                           FW_PORT_CMD_DCBXDIS)
+                                          ? CXGB4_DCB_INPUT_FW_DISABLED
+                                          : CXGB4_DCB_INPUT_FW_ENABLED);
+
+                       cxgb4_dcb_state_fsm(dev, state_input);
+               }
+
+               if (cmd == FW_PORT_CMD &&
+                   action == FW_PORT_ACTION_L2_DCB_CFG)
+                       dcb_rpl(q->adap, pcmd);
+               else
+#endif
+                       if (p->type == 0)
+                               t4_handle_fw_rpl(q->adap, p->data);
        } else if (opcode == CPL_L2T_WRITE_RPL) {
                const struct cpl_l2t_write_rpl *p = (void *)rsp;
 
@@ -1290,6 +1422,48 @@ static int del_filter_wr(struct adapter *adapter, int fidx)
        return 0;
 }
 
+static u16 cxgb_select_queue(struct net_device *dev, struct sk_buff *skb,
+                            void *accel_priv, select_queue_fallback_t fallback)
+{
+       int txq;
+
+#ifdef CONFIG_CHELSIO_T4_DCB
+       /* If a Data Center Bridging has been successfully negotiated on this
+        * link then we'll use the skb's priority to map it to a TX Queue.
+        * The skb's priority is determined via the VLAN Tag Priority Code
+        * Point field.
+        */
+       if (cxgb4_dcb_enabled(dev)) {
+               u16 vlan_tci;
+               int err;
+
+               err = vlan_get_tag(skb, &vlan_tci);
+               if (unlikely(err)) {
+                       if (net_ratelimit())
+                               netdev_warn(dev,
+                                           "TX Packet without VLAN Tag on DCB Link\n");
+                       txq = 0;
+               } else {
+                       txq = (vlan_tci & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
+               }
+               return txq;
+       }
+#endif /* CONFIG_CHELSIO_T4_DCB */
+
+       if (select_queue) {
+               txq = (skb_rx_queue_recorded(skb)
+                       ? skb_get_rx_queue(skb)
+                       : smp_processor_id());
+
+               while (unlikely(txq >= dev->real_num_tx_queues))
+                       txq -= dev->real_num_tx_queues;
+
+               return txq;
+       }
+
+       return fallback(dev, skb) % dev->real_num_tx_queues;
+}
+
 static inline int is_offload(const struct adapter *adap)
 {
        return adap->params.offload;
@@ -2912,6 +3086,8 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
        loff_t avail = file_inode(file)->i_size;
        unsigned int mem = (uintptr_t)file->private_data & 3;
        struct adapter *adap = file->private_data - mem;
+       __be32 *data;
+       int ret;
 
        if (pos < 0)
                return -EINVAL;
@@ -2920,29 +3096,24 @@ static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
        if (count > avail - pos)
                count = avail - pos;
 
-       while (count) {
-               size_t len;
-               int ret, ofst;
-               __be32 data[16];
+       data = t4_alloc_mem(count);
+       if (!data)
+               return -ENOMEM;
 
-               if ((mem == MEM_MC) || (mem == MEM_MC1))
-                       ret = t4_mc_read(adap, mem % MEM_MC, pos, data, NULL);
-               else
-                       ret = t4_edc_read(adap, mem, pos, data, NULL);
-               if (ret)
-                       return ret;
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, 0, mem, pos, count, data, T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
+       if (ret) {
+               t4_free_mem(data);
+               return ret;
+       }
+       ret = copy_to_user(buf, data, count);
 
-               ofst = pos % sizeof(data);
-               len = min(count, sizeof(data) - ofst);
-               if (copy_to_user(buf, (u8 *)data + ofst, len))
-                       return -EFAULT;
+       t4_free_mem(data);
+       if (ret)
+               return -EFAULT;
 
-               buf += len;
-               pos += len;
-               count -= len;
-       }
-       count = pos - *ppos;
-       *ppos = pos;
+       *ppos = pos + count;
        return count;
 }
 
@@ -3274,8 +3445,8 @@ static int tid_init(struct tid_info *t)
        return 0;
 }
 
-static int cxgb4_clip_get(const struct net_device *dev,
-                         const struct in6_addr *lip)
+int cxgb4_clip_get(const struct net_device *dev,
+                  const struct in6_addr *lip)
 {
        struct adapter *adap;
        struct fw_clip_cmd c;
@@ -3289,9 +3460,10 @@ static int cxgb4_clip_get(const struct net_device *dev,
        c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 }
+EXPORT_SYMBOL(cxgb4_clip_get);
 
-static int cxgb4_clip_release(const struct net_device *dev,
-                             const struct in6_addr *lip)
+int cxgb4_clip_release(const struct net_device *dev,
+                      const struct in6_addr *lip)
 {
        struct adapter *adap;
        struct fw_clip_cmd c;
@@ -3305,6 +3477,7 @@ static int cxgb4_clip_release(const struct net_device *dev,
        c.ip_lo = *(__be64 *)(lip->s6_addr + 8);
        return t4_wr_mbox_meat(adap, adap->mbox, &c, sizeof(c), &c, false);
 }
+EXPORT_SYMBOL(cxgb4_clip_release);
 
 /**
  *     cxgb4_create_server - create an IP server
@@ -3603,7 +3776,11 @@ static int read_eq_indices(struct adapter *adap, u16 qid, u16 *pidx, u16 *cidx)
        __be64 indices;
        int ret;
 
-       ret = t4_mem_win_read_len(adap, addr, (__be32 *)&indices, 8);
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, 0, MEM_EDC0, addr,
+                          sizeof(indices), (__be32 *)&indices,
+                          T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
        if (!ret) {
                *cidx = (be64_to_cpu(indices) >> 25) & 0xffff;
                *pidx = (be64_to_cpu(indices) >> 9) & 0xffff;
@@ -3657,6 +3834,85 @@ void cxgb4_enable_db_coalescing(struct net_device *dev)
 }
 EXPORT_SYMBOL(cxgb4_enable_db_coalescing);
 
+int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte)
+{
+       struct adapter *adap;
+       u32 offset, memtype, memaddr;
+       u32 edc0_size, edc1_size, mc0_size, mc1_size;
+       u32 edc0_end, edc1_end, mc0_end, mc1_end;
+       int ret;
+
+       adap = netdev2adap(dev);
+
+       offset = ((stag >> 8) * 32) + adap->vres.stag.start;
+
+       /* Figure out where the offset lands in the Memory Type/Address scheme.
+        * This code assumes that the memory is laid out starting at offset 0
+        * with no breaks as: EDC0, EDC1, MC0, MC1. All cards have both EDC0
+        * and EDC1.  Some cards will have neither MC0 nor MC1, most cards have
+        * MC0, and some have both MC0 and MC1.
+        */
+       edc0_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM0_BAR)) << 20;
+       edc1_size = EDRAM_SIZE_GET(t4_read_reg(adap, MA_EDRAM1_BAR)) << 20;
+       mc0_size = EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)) << 20;
+
+       edc0_end = edc0_size;
+       edc1_end = edc0_end + edc1_size;
+       mc0_end = edc1_end + mc0_size;
+
+       if (offset < edc0_end) {
+               memtype = MEM_EDC0;
+               memaddr = offset;
+       } else if (offset < edc1_end) {
+               memtype = MEM_EDC1;
+               memaddr = offset - edc0_end;
+       } else {
+               if (offset < mc0_end) {
+                       memtype = MEM_MC0;
+                       memaddr = offset - edc1_end;
+               } else if (is_t4(adap->params.chip)) {
+                       /* T4 only has a single memory channel */
+                       goto err;
+               } else {
+                       mc1_size = EXT_MEM_SIZE_GET(
+                                       t4_read_reg(adap,
+                                                   MA_EXT_MEMORY1_BAR)) << 20;
+                       mc1_end = mc0_end + mc1_size;
+                       if (offset < mc1_end) {
+                               memtype = MEM_MC1;
+                               memaddr = offset - mc0_end;
+                       } else {
+                               /* offset beyond the end of any memory */
+                               goto err;
+                       }
+               }
+       }
+
+       spin_lock(&adap->win0_lock);
+       ret = t4_memory_rw(adap, 0, memtype, memaddr, 32, tpte, T4_MEMORY_READ);
+       spin_unlock(&adap->win0_lock);
+       return ret;
+
+err:
+       dev_err(adap->pdev_dev, "stag %#x, offset %#x out of range\n",
+               stag, offset);
+       return -EINVAL;
+}
+EXPORT_SYMBOL(cxgb4_read_tpte);
+
+u64 cxgb4_read_sge_timestamp(struct net_device *dev)
+{
+       u32 hi, lo;
+       struct adapter *adap;
+
+       adap = netdev2adap(dev);
+       lo = t4_read_reg(adap, SGE_TIMESTAMP_LO);
+       hi = GET_TSVAL(t4_read_reg(adap, SGE_TIMESTAMP_HI));
+
+       return ((u64)hi << 32) | (u64)lo;
+}
+EXPORT_SYMBOL(cxgb4_read_sge_timestamp);
+
 static struct pci_driver cxgb4_driver;
 
 static void check_neigh_update(struct neighbour *neigh)
@@ -3899,6 +4155,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        unsigned short i;
 
        lli.pdev = adap->pdev;
+       lli.pf = adap->fn;
        lli.l2t = adap->l2t;
        lli.tids = &adap->tids;
        lli.ports = adap->port;
@@ -3919,6 +4176,7 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.wr_cred = adap->params.ofldq_wr_cred;
        lli.adapter_type = adap->params.chip;
        lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+       lli.cclk_ps = 1000000000 / adap->params.vpd.cclk;
        lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
                        t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF) >>
                        (adap->fn * 4));
@@ -3933,8 +4191,12 @@ static void uld_attach(struct adapter *adap, unsigned int uld)
        lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
        lli.fw_vers = adap->params.fw_vers;
        lli.dbfifo_int_thresh = dbfifo_int_thresh;
+       lli.sge_ingpadboundary = adap->sge.fl_align;
+       lli.sge_egrstatuspagesize = adap->sge.stat_len;
        lli.sge_pktshift = adap->sge.pktshift;
        lli.enable_fw_ofld_conn = adap->flags & FW_OFLD_CONN;
+       lli.max_ordird_qp = adap->params.max_ordird_qp;
+       lli.max_ird_adapter = adap->params.max_ird_adapter;
        lli.ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
 
        handle = ulds[uld].add(&lli);
@@ -4057,22 +4319,19 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
 EXPORT_SYMBOL(cxgb4_unregister_uld);
 
 /* Check if netdev on which event is occured belongs to us or not. Return
- * suceess (1) if it belongs otherwise failure (0).
+ * success (true) if it belongs otherwise failure (false).
+ * Called with rcu_read_lock() held.
  */
-static int cxgb4_netdev(struct net_device *netdev)
+static bool cxgb4_netdev(const struct net_device *netdev)
 {
        struct adapter *adap;
        int i;
 
-       spin_lock(&adap_rcu_lock);
        list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
                for (i = 0; i < MAX_NPORTS; i++)
-                       if (adap->port[i] == netdev) {
-                               spin_unlock(&adap_rcu_lock);
-                               return 1;
-                       }
-       spin_unlock(&adap_rcu_lock);
-       return 0;
+                       if (adap->port[i] == netdev)
+                               return true;
+       return false;
 }
 
 static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
@@ -4601,6 +4860,7 @@ static const struct net_device_ops cxgb4_netdev_ops = {
        .ndo_open             = cxgb_open,
        .ndo_stop             = cxgb_close,
        .ndo_start_xmit       = t4_eth_xmit,
+       .ndo_select_queue     = cxgb_select_queue,
        .ndo_get_stats64      = cxgb_get_stats,
        .ndo_set_rx_mode      = cxgb_set_rxmode,
        .ndo_set_mac_address  = cxgb_set_mac_addr,
@@ -4620,20 +4880,75 @@ void t4_fatal_err(struct adapter *adap)
        dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
 }
 
+/* Return the specified PCI-E Configuration Space register from our Physical
+ * Function.  We try first via a Firmware LDST Command since we prefer to let
+ * the firmware own all of these registers, but if that fails we go for it
+ * directly ourselves.
+ */
+static u32 t4_read_pcie_cfg4(struct adapter *adap, int reg)
+{
+       struct fw_ldst_cmd ldst_cmd;
+       u32 val;
+       int ret;
+
+       /* Construct and send the Firmware LDST Command to retrieve the
+        * specified PCI-E Configuration Space register.
+        */
+       memset(&ldst_cmd, 0, sizeof(ldst_cmd));
+       ldst_cmd.op_to_addrspace =
+               htonl(FW_CMD_OP(FW_LDST_CMD) |
+                     FW_CMD_REQUEST |
+                     FW_CMD_READ |
+                     FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
+       ldst_cmd.cycles_to_len16 = htonl(FW_LEN16(ldst_cmd));
+       ldst_cmd.u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
+       ldst_cmd.u.pcie.ctrl_to_fn =
+               (FW_LDST_CMD_LC | FW_LDST_CMD_FN(adap->fn));
+       ldst_cmd.u.pcie.r = reg;
+       ret = t4_wr_mbox(adap, adap->mbox, &ldst_cmd, sizeof(ldst_cmd),
+                        &ldst_cmd);
+
+       /* If the LDST Command suucceeded, exctract the returned register
+        * value.  Otherwise read it directly ourself.
+        */
+       if (ret == 0)
+               val = ntohl(ldst_cmd.u.pcie.data[0]);
+       else
+               t4_hw_pci_read_cfg4(adap, reg, &val);
+
+       return val;
+}
+
 static void setup_memwin(struct adapter *adap)
 {
-       u32 bar0, mem_win0_base, mem_win1_base, mem_win2_base;
+       u32 mem_win0_base, mem_win1_base, mem_win2_base, mem_win2_aperture;
 
-       bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
        if (is_t4(adap->params.chip)) {
+               u32 bar0;
+
+               /* Truncation intentional: we only read the bottom 32-bits of
+                * the 64-bit BAR0/BAR1 ...  We use the hardware backdoor
+                * mechanism to read BAR0 instead of using
+                * pci_resource_start() because we could be operating from
+                * within a Virtual Machine which is trapping our accesses to
+                * our Configuration Space and we need to set up the PCI-E
+                * Memory Window decoders with the actual addresses which will
+                * be coming across the PCI-E link.
+                */
+               bar0 = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_0);
+               bar0 &= PCI_BASE_ADDRESS_MEM_MASK;
+               adap->t4_bar0 = bar0;
+
                mem_win0_base = bar0 + MEMWIN0_BASE;
                mem_win1_base = bar0 + MEMWIN1_BASE;
                mem_win2_base = bar0 + MEMWIN2_BASE;
+               mem_win2_aperture = MEMWIN2_APERTURE;
        } else {
                /* For T5, only relative offset inside the PCIe BAR is passed */
                mem_win0_base = MEMWIN0_BASE;
-               mem_win1_base = MEMWIN1_BASE_T5;
+               mem_win1_base = MEMWIN1_BASE;
                mem_win2_base = MEMWIN2_BASE_T5;
+               mem_win2_aperture = MEMWIN2_APERTURE_T5;
        }
        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
                     mem_win0_base | BIR(0) |
@@ -4643,16 +4958,19 @@ static void setup_memwin(struct adapter *adap)
                     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
        t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
                     mem_win2_base | BIR(0) |
-                    WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+                    WINDOW(ilog2(mem_win2_aperture) - 10));
+       t4_read_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2));
 }
 
 static void setup_memwin_rdma(struct adapter *adap)
 {
        if (adap->vres.ocq.size) {
-               unsigned int start, sz_kb;
+               u32 start;
+               unsigned int sz_kb;
 
-               start = pci_resource_start(adap->pdev, 2) +
-                       OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
+               start = t4_read_pcie_cfg4(adap, PCI_BASE_ADDRESS_2);
+               start &= PCI_BASE_ADDRESS_MEM_MASK;
+               start += OCQ_WIN_OFFSET(adap->pdev, &adap->vres);
                sz_kb = roundup_pow_of_two(adap->vres.ocq.size) >> 10;
                t4_write_reg(adap,
                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 3),
@@ -4865,7 +5183,7 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                                              adapter->fn, 0, 1, params, val);
                        if (ret == 0) {
                                /*
-                                * For t4_memory_write() below addresses and
+                                * For t4_memory_rw() below addresses and
                                 * sizes have to be in terms of multiples of 4
                                 * bytes.  So, if the Configuration File isn't
                                 * a multiple of 4 bytes in length we'll have
@@ -4881,8 +5199,9 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                                mtype = FW_PARAMS_PARAM_Y_GET(val[0]);
                                maddr = FW_PARAMS_PARAM_Z_GET(val[0]) << 16;
 
-                               ret = t4_memory_write(adapter, mtype, maddr,
-                                                     size, data);
+                               spin_lock(&adapter->win0_lock);
+                               ret = t4_memory_rw(adapter, 0, mtype, maddr,
+                                                  size, data, T4_MEMORY_WRITE);
                                if (ret == 0 && resid != 0) {
                                        union {
                                                __be32 word;
@@ -4893,10 +5212,12 @@ static int adap_init0_config(struct adapter *adapter, int reset)
                                        last.word = data[size >> 2];
                                        for (i = resid; i < 4; i++)
                                                last.buf[i] = 0;
-                                       ret = t4_memory_write(adapter, mtype,
-                                                             maddr + size,
-                                                             4, &last.word);
+                                       ret = t4_memory_rw(adapter, 0, mtype,
+                                                          maddr + size,
+                                                          4, &last.word,
+                                                          T4_MEMORY_WRITE);
                                }
+                               spin_unlock(&adapter->win0_lock);
                        }
                }
 
@@ -5640,6 +5961,22 @@ static int adap_init0(struct adapter *adap)
                adap->vres.cq.size = val[3] - val[2] + 1;
                adap->vres.ocq.start = val[4];
                adap->vres.ocq.size = val[5] - val[4] + 1;
+
+               params[0] = FW_PARAM_DEV(MAXORDIRD_QP);
+               params[1] = FW_PARAM_DEV(MAXIRD_ADAPTER);
+               ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+               if (ret < 0) {
+                       adap->params.max_ordird_qp = 8;
+                       adap->params.max_ird_adapter = 32 * adap->tids.ntids;
+                       ret = 0;
+               } else {
+                       adap->params.max_ordird_qp = val[0];
+                       adap->params.max_ird_adapter = val[1];
+               }
+               dev_info(adap->pdev_dev,
+                        "max_ordird_qp %d max_ird_adapter %d\n",
+                        adap->params.max_ordird_qp,
+                        adap->params.max_ird_adapter);
        }
        if (caps_cmd.iscsicaps) {
                params[0] = FW_PARAM_PFVF(ISCSI_START);
@@ -5841,12 +6178,33 @@ static inline void init_rspq(struct adapter *adap, struct sge_rspq *q,
 static void cfg_queues(struct adapter *adap)
 {
        struct sge *s = &adap->sge;
-       int i, q10g = 0, n10g = 0, qidx = 0;
+       int i, n10g = 0, qidx = 0;
+#ifndef CONFIG_CHELSIO_T4_DCB
+       int q10g = 0;
+#endif
        int ciq_size;
 
        for_each_port(adap, i)
                n10g += is_x_10g_port(&adap2pinfo(adap, i)->link_cfg);
+#ifdef CONFIG_CHELSIO_T4_DCB
+       /* For Data Center Bridging support we need to be able to support up
+        * to 8 Traffic Priorities; each of which will be assigned to its
+        * own TX Queue in order to prevent Head-Of-Line Blocking.
+        */
+       if (adap->params.nports * 8 > MAX_ETH_QSETS) {
+               dev_err(adap->pdev_dev, "MAX_ETH_QSETS=%d < %d!\n",
+                       MAX_ETH_QSETS, adap->params.nports * 8);
+               BUG_ON(1);
+       }
+
+       for_each_port(adap, i) {
+               struct port_info *pi = adap2pinfo(adap, i);
 
+               pi->first_qset = qidx;
+               pi->nqsets = 8;
+               qidx += pi->nqsets;
+       }
+#else /* !CONFIG_CHELSIO_T4_DCB */
        /*
         * We default to 1 queue per non-10G port and up to # of cores queues
         * per 10G port.
@@ -5863,6 +6221,7 @@ static void cfg_queues(struct adapter *adap)
                pi->nqsets = is_x_10g_port(&pi->link_cfg) ? q10g : 1;
                qidx += pi->nqsets;
        }
+#endif /* !CONFIG_CHELSIO_T4_DCB */
 
        s->ethqsets = qidx;
        s->max_ethqsets = qidx;   /* MSI-X may lower it later */
@@ -5981,8 +6340,14 @@ static int enable_msix(struct adapter *adap)
                /* need nchan for each possible ULD */
                ofld_need = 3 * nchan;
        }
+#ifdef CONFIG_CHELSIO_T4_DCB
+       /* For Data Center Bridging we need 8 Ethernet TX Priority Queues for
+        * each port.
+        */
+       need = 8 * adap->params.nports + EXTRA_VECS + ofld_need;
+#else
        need = adap->params.nports + EXTRA_VECS + ofld_need;
-
+#endif
        want = pci_enable_msix_range(adap->pdev, entries, need, want);
        if (want < 0)
                return want;
@@ -6114,13 +6479,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                return err;
        }
 
-       /* We control everything through one PF */
-       func = PCI_FUNC(pdev->devfn);
-       if (func != ent->driver_data) {
-               pci_save_state(pdev);        /* to restore SR-IOV later */
-               goto sriov;
-       }
-
        err = pci_enable_device(pdev);
        if (err) {
                dev_err(&pdev->dev, "cannot enable PCI device\n");
@@ -6164,6 +6522,15 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto out_free_adapter;
        }
 
+       /* We control everything through one PF */
+       func = SOURCEPF_GET(readl(adapter->regs + PL_WHOAMI));
+       if ((pdev->device == 0xa000 && func != 0) ||
+           func != ent->driver_data) {
+               pci_save_state(pdev);        /* to restore SR-IOV later */
+               err = 0;
+               goto out_unmap_bar0;
+       }
+
        adapter->pdev = pdev;
        adapter->pdev_dev = &pdev->dev;
        adapter->mbox = func;
@@ -6245,6 +6612,10 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                netdev->priv_flags |= IFF_UNICAST_FLT;
 
                netdev->netdev_ops = &cxgb4_netdev_ops;
+#ifdef CONFIG_CHELSIO_T4_DCB
+               netdev->dcbnl_ops = &cxgb4_dcb_ops;
+               cxgb4_dcb_state_init(netdev);
+#endif
                netdev->ethtool_ops = &cxgb_ethtool_ops;
        }
 
@@ -6323,7 +6694,6 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        if (is_offload(adapter))
                attach_ulds(adapter);
 
-sriov:
 #ifdef CONFIG_PCI_IOV
        if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
                if (pci_enable_sriov(pdev, num_vf[func]) == 0)
@@ -6369,8 +6739,7 @@ static void remove_one(struct pci_dev *pdev)
                        if (adapter->port[i]->reg_state == NETREG_REGISTERED)
                                unregister_netdev(adapter->port[i]);
 
-               if (adapter->debugfs_root)
-                       debugfs_remove_recursive(adapter->debugfs_root);
+               debugfs_remove_recursive(adapter->debugfs_root);
 
                /* If we allocated filters, free up state associated with any
                 * valid filters ...
@@ -6396,6 +6765,7 @@ static void remove_one(struct pci_dev *pdev)
                        adapter->flags &= ~DEV_ENABLED;
                }
                pci_release_regions(pdev);
+               synchronize_rcu();
                kfree(adapter);
        } else
                pci_release_regions(pdev);
index 55e9daf..1366ba6 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -172,6 +172,10 @@ int cxgb4_create_server_filter(const struct net_device *dev, unsigned int stid,
                               unsigned char port, unsigned char mask);
 int cxgb4_remove_server_filter(const struct net_device *dev, unsigned int stid,
                               unsigned int queue, bool ipv6);
+int cxgb4_clip_get(const struct net_device *dev, const struct in6_addr *lip);
+int cxgb4_clip_release(const struct net_device *dev,
+                      const struct in6_addr *lip);
+
 static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
 {
        skb_set_queue_mapping(skb, (queue << 1) | prio);
@@ -243,6 +247,7 @@ struct cxgb4_lld_info {
        unsigned char fw_api_ver;            /* FW API version */
        unsigned int fw_vers;                /* FW version */
        unsigned int iscsi_iolen;            /* iSCSI max I/O length */
+       unsigned int cclk_ps;                /* Core clock period in psec */
        unsigned short udb_density;          /* # of user DB/page */
        unsigned short ucq_density;          /* # of user CQs/page */
        unsigned short filt_mode;            /* filter optional components */
@@ -251,10 +256,15 @@ struct cxgb4_lld_info {
        void __iomem *gts_reg;               /* address of GTS register */
        void __iomem *db_reg;                /* address of kernel doorbell */
        int dbfifo_int_thresh;               /* doorbell fifo int threshold */
+       unsigned int sge_ingpadboundary;     /* SGE ingress padding boundary */
+       unsigned int sge_egrstatuspagesize;  /* SGE egress status page size */
        unsigned int sge_pktshift;           /* Padding between CPL and */
                                             /* packet data */
+       unsigned int pf;                     /* Physical Function we're using */
        bool enable_fw_ofld_conn;            /* Enable connection through fw */
                                             /* WR */
+       unsigned int max_ordird_qp;          /* Max ORD/IRD depth per RDMA QP */
+       unsigned int max_ird_adapter;        /* Max IRD memory per adapter */
        bool ulptx_memwrite_dsgl;            /* use of T5 DSGL allowed */
 };
 
@@ -291,5 +301,7 @@ int cxgb4_sync_txq_pidx(struct net_device *dev, u16 qid, u16 pidx, u16 size);
 int cxgb4_flush_eq_cache(struct net_device *dev);
 void cxgb4_disable_db_coalescing(struct net_device *dev);
 void cxgb4_enable_db_coalescing(struct net_device *dev);
+int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
+u64 cxgb4_read_sge_timestamp(struct net_device *dev);
 
 #endif  /* !__CXGB4_OFLD_H */
index 8a96572..9604139 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 85eb5c7..a30126c 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index dd4355d..b0bba32 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -2479,6 +2479,22 @@ static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
        }
 }
 
+/**
+ *      t4_free_ofld_rxqs - free a block of consecutive Rx queues
+ *      @adap: the adapter
+ *      @n: number of queues
+ *      @q: pointer to first queue
+ *
+ *      Release the resources of a consecutive block of offload Rx queues.
+ */
+void t4_free_ofld_rxqs(struct adapter *adap, int n, struct sge_ofld_rxq *q)
+{
+       for ( ; n; n--, q++)
+               if (q->rspq.desc)
+                       free_rspq_fl(adap, &q->rspq,
+                                    q->fl.size ? &q->fl : NULL);
+}
+
 /**
  *     t4_free_sge_resources - free SGE resources
  *     @adap: the adapter
@@ -2490,12 +2506,12 @@ void t4_free_sge_resources(struct adapter *adap)
        int i;
        struct sge_eth_rxq *eq = adap->sge.ethrxq;
        struct sge_eth_txq *etq = adap->sge.ethtxq;
-       struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
 
        /* clean up Ethernet Tx/Rx queues */
        for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
                if (eq->rspq.desc)
-                       free_rspq_fl(adap, &eq->rspq, &eq->fl);
+                       free_rspq_fl(adap, &eq->rspq,
+                                    eq->fl.size ? &eq->fl : NULL);
                if (etq->q.desc) {
                        t4_eth_eq_free(adap, adap->fn, adap->fn, 0,
                                       etq->q.cntxt_id);
@@ -2506,18 +2522,9 @@ void t4_free_sge_resources(struct adapter *adap)
        }
 
        /* clean up RDMA and iSCSI Rx queues */
-       for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
-               if (oq->rspq.desc)
-                       free_rspq_fl(adap, &oq->rspq, &oq->fl);
-       }
-       for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
-               if (oq->rspq.desc)
-                       free_rspq_fl(adap, &oq->rspq, &oq->fl);
-       }
-       for (i = 0, oq = adap->sge.rdmaciq; i < adap->sge.rdmaciqs; i++, oq++) {
-               if (oq->rspq.desc)
-                       free_rspq_fl(adap, &oq->rspq, &oq->fl);
-       }
+       t4_free_ofld_rxqs(adap, adap->sge.ofldqsets, adap->sge.ofldrxq);
+       t4_free_ofld_rxqs(adap, adap->sge.rdmaqs, adap->sge.rdmarxq);
+       t4_free_ofld_rxqs(adap, adap->sge.rdmaciqs, adap->sge.rdmaciq);
 
        /* clean up offload Tx queues */
        for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
index bba6768..448bec1 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -143,6 +143,30 @@ void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
        }
 }
 
+/*
+ * Read a 32-bit PCI Configuration Space register via the PCI-E backdoor
+ * mechanism.  This guarantees that we get the real value even if we're
+ * operating within a Virtual Machine and the Hypervisor is trapping our
+ * Configuration Space accesses.
+ */
+void t4_hw_pci_read_cfg4(struct adapter *adap, int reg, u32 *val)
+{
+       u32 req = ENABLE | FUNCTION(adap->fn) | reg;
+
+       if (is_t4(adap->params.chip))
+               req |= F_LOCALCFG;
+
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, req);
+       *val = t4_read_reg(adap, PCIE_CFG_SPACE_DATA);
+
+       /* Reset ENABLE to 0 so reads of PCIE_CFG_SPACE_DATA won't cause a
+        * Configuration Space read.  (None of the other fields matter when
+        * ENABLE is 0 so a simple register write is easier than a
+        * read-modify-write via t4_set_reg_field().)
+        */
+       t4_write_reg(adap, PCIE_CFG_SPACE_REQ, 0);
+}
+
 /*
  * Get the reply to a mailbox command and store it in @rpl in big-endian order.
  */
@@ -389,78 +413,41 @@ int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
        return 0;
 }
 
-/*
- *     t4_mem_win_rw - read/write memory through PCIE memory window
- *     @adap: the adapter
- *     @addr: address of first byte requested
- *     @data: MEMWIN0_APERTURE bytes of data containing the requested address
- *     @dir: direction of transfer 1 => read, 0 => write
- *
- *     Read/write MEMWIN0_APERTURE bytes of data from MC starting at a
- *     MEMWIN0_APERTURE-byte-aligned address that covers the requested
- *     address @addr.
- */
-static int t4_mem_win_rw(struct adapter *adap, u32 addr, __be32 *data, int dir)
-{
-       int i;
-       u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
-
-       /*
-        * Setup offset into PCIE memory window.  Address must be a
-        * MEMWIN0_APERTURE-byte-aligned address.  (Read back MA register to
-        * ensure that changes propagate before we attempt to use the new
-        * values.)
-        */
-       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
-                    (addr & ~(MEMWIN0_APERTURE - 1)) | win_pf);
-       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
-
-       /* Collecting data 4 bytes at a time upto MEMWIN0_APERTURE */
-       for (i = 0; i < MEMWIN0_APERTURE; i = i+0x4) {
-               if (dir)
-                       *data++ = (__force __be32) t4_read_reg(adap,
-                                                       (MEMWIN0_BASE + i));
-               else
-                       t4_write_reg(adap, (MEMWIN0_BASE + i),
-                                    (__force u32) *data++);
-       }
-
-       return 0;
-}
-
 /**
  *     t4_memory_rw - read/write EDC 0, EDC 1 or MC via PCIE memory window
  *     @adap: the adapter
+ *     @win: PCI-E Memory Window to use
  *     @mtype: memory type: MEM_EDC0, MEM_EDC1 or MEM_MC
  *     @addr: address within indicated memory type
  *     @len: amount of memory to transfer
  *     @buf: host memory buffer
- *     @dir: direction of transfer 1 => read, 0 => write
+ *     @dir: direction of transfer T4_MEMORY_READ (1) or T4_MEMORY_WRITE (0)
  *
  *     Reads/writes an [almost] arbitrary memory region in the firmware: the
- *     firmware memory address, length and host buffer must be aligned on
- *     32-bit boudaries.  The memory is transferred as a raw byte sequence
- *     from/to the firmware's memory.  If this memory contains data
- *     structures which contain multi-byte integers, it's the callers
- *     responsibility to perform appropriate byte order conversions.
+ *     firmware memory address and host buffer must be aligned on 32-bit
+ *     boudaries; the length may be arbitrary.  The memory is transferred as
+ *     a raw byte sequence from/to the firmware's memory.  If this memory
+ *     contains data structures which contain multi-byte integers, it's the
+ *     caller's responsibility to perform appropriate byte order conversions.
  */
-static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
-                       __be32 *buf, int dir)
+int t4_memory_rw(struct adapter *adap, int win, int mtype, u32 addr,
+                u32 len, __be32 *buf, int dir)
 {
-       u32 pos, start, end, offset, memoffset;
-       u32 edc_size, mc_size;
-       int ret = 0;
-       __be32 *data;
+       u32 pos, offset, resid, memoffset;
+       u32 edc_size, mc_size, win_pf, mem_reg, mem_aperture, mem_base;
 
-       /*
-        * Argument sanity checks ...
+       /* Argument sanity checks ...
         */
-       if ((addr & 0x3) || (len & 0x3))
+       if (addr & 0x3)
                return -EINVAL;
 
-       data = vmalloc(MEMWIN0_APERTURE);
-       if (!data)
-               return -ENOMEM;
+       /* It's convenient to be able to handle lengths which aren't a
+        * multiple of 32-bits because we often end up transferring files to
+        * the firmware.  So we'll handle that by normalizing the length here
+        * and then handling any residual transfer at the end.
+        */
+       resid = len & 0x3;
+       len -= resid;
 
        /* Offset into the region of memory which is being accessed
         * MEM_EDC0 = 0
@@ -481,66 +468,98 @@ static int t4_memory_rw(struct adapter *adap, int mtype, u32 addr, u32 len,
        /* Determine the PCIE_MEM_ACCESS_OFFSET */
        addr = addr + memoffset;
 
-       /*
-        * The underlaying EDC/MC read routines read MEMWIN0_APERTURE bytes
-        * at a time so we need to round down the start and round up the end.
-        * We'll start copying out of the first line at (addr - start) a word
-        * at a time.
+       /* Each PCI-E Memory Window is programmed with a window size -- or
+        * "aperture" -- which controls the granularity of its mapping onto
+        * adapter memory.  We need to grab that aperture in order to know
+        * how to use the specified window.  The window is also programmed
+        * with the base address of the Memory Window in BAR0's address
+        * space.  For T4 this is an absolute PCI-E Bus Address.  For T5
+        * the address is relative to BAR0.
         */
-       start = addr & ~(MEMWIN0_APERTURE-1);
-       end = (addr + len + MEMWIN0_APERTURE-1) & ~(MEMWIN0_APERTURE-1);
-       offset = (addr - start)/sizeof(__be32);
+       mem_reg = t4_read_reg(adap,
+                             PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN,
+                                                 win));
+       mem_aperture = 1 << (GET_WINDOW(mem_reg) + 10);
+       mem_base = GET_PCIEOFST(mem_reg) << 10;
+       if (is_t4(adap->params.chip))
+               mem_base -= adap->t4_bar0;
+       win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
 
-       for (pos = start; pos < end; pos += MEMWIN0_APERTURE, offset = 0) {
+       /* Calculate our initial PCI-E Memory Window Position and Offset into
+        * that Window.
+        */
+       pos = addr & ~(mem_aperture-1);
+       offset = addr - pos;
 
-               /*
-                * If we're writing, copy the data from the caller's memory
-                * buffer
+       /* Set up initial PCI-E Memory Window to cover the start of our
+        * transfer.  (Read it back to ensure that changes propagate before we
+        * attempt to use the new value.)
+        */
+       t4_write_reg(adap,
+                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win),
+                    pos | win_pf);
+       t4_read_reg(adap,
+                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET, win));
+
+       /* Transfer data to/from the adapter as long as there's an integral
+        * number of 32-bit transfers to complete.
+        */
+       while (len > 0) {
+               if (dir == T4_MEMORY_READ)
+                       *buf++ = (__force __be32) t4_read_reg(adap,
+                                                       mem_base + offset);
+               else
+                       t4_write_reg(adap, mem_base + offset,
+                                    (__force u32) *buf++);
+               offset += sizeof(__be32);
+               len -= sizeof(__be32);
+
+               /* If we've reached the end of our current window aperture,
+                * move the PCI-E Memory Window on to the next.  Note that
+                * doing this here after "len" may be 0 allows us to set up
+                * the PCI-E Memory Window for a possible final residual
+                * transfer below ...
                 */
-               if (!dir) {
-                       /*
-                        * If we're doing a partial write, then we need to do
-                        * a read-modify-write ...
-                        */
-                       if (offset || len < MEMWIN0_APERTURE) {
-                               ret = t4_mem_win_rw(adap, pos, data, 1);
-                               if (ret)
-                                       break;
-                       }
-                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
-                              len > 0) {
-                               data[offset++] = *buf++;
-                               len -= sizeof(__be32);
-                       }
+               if (offset == mem_aperture) {
+                       pos += mem_aperture;
+                       offset = 0;
+                       t4_write_reg(adap,
+                                    PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
+                                                        win), pos | win_pf);
+                       t4_read_reg(adap,
+                                   PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_OFFSET,
+                                                       win));
                }
-
-               /*
-                * Transfer a block of memory and bail if there's an error.
-                */
-               ret = t4_mem_win_rw(adap, pos, data, dir);
-               if (ret)
-                       break;
-
-               /*
-                * If we're reading, copy the data into the caller's memory
-                * buffer.
-                */
-               if (dir)
-                       while (offset < (MEMWIN0_APERTURE/sizeof(__be32)) &&
-                              len > 0) {
-                               *buf++ = data[offset++];
-                               len -= sizeof(__be32);
-                       }
        }
 
-       vfree(data);
-       return ret;
-}
+       /* If the original transfer had a length which wasn't a multiple of
+        * 32-bits, now's where we need to finish off the transfer of the
+        * residual amount.  The PCI-E Memory Window has already been moved
+        * above (if necessary) to cover this final transfer.
+        */
+       if (resid) {
+               union {
+                       __be32 word;
+                       char byte[4];
+               } last;
+               unsigned char *bp;
+               int i;
+
+               if (dir == T4_MEMORY_READ) {
+                       last.word = (__force __be32) t4_read_reg(adap,
+                                                       mem_base + offset);
+                       for (bp = (unsigned char *)buf, i = resid; i < 4; i++)
+                               bp[i] = last.byte[i];
+               } else {
+                       last.word = *buf;
+                       for (i = resid; i < 4; i++)
+                               last.byte[i] = 0;
+                       t4_write_reg(adap, mem_base + offset,
+                                    (__force u32) last.word);
+               }
+       }
 
-int t4_memory_write(struct adapter *adap, int mtype, u32 addr, u32 len,
-                   __be32 *buf)
-{
-       return t4_memory_rw(adap, mtype, addr, len, buf, 0);
+       return 0;
 }
 
 #define EEPROM_STAT_ADDR   0x7bfc
@@ -1700,16 +1719,24 @@ static void mps_intr_handler(struct adapter *adapter)
  */
 static void mem_intr_handler(struct adapter *adapter, int idx)
 {
-       static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+       static const char name[4][7] = { "EDC0", "EDC1", "MC/MC0", "MC1" };
 
        unsigned int addr, cnt_addr, v;
 
        if (idx <= MEM_EDC1) {
                addr = EDC_REG(EDC_INT_CAUSE, idx);
                cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+       } else if (idx == MEM_MC) {
+               if (is_t4(adapter->params.chip)) {
+                       addr = MC_INT_CAUSE;
+                       cnt_addr = MC_ECC_STATUS;
+               } else {
+                       addr = MC_P_INT_CAUSE;
+                       cnt_addr = MC_P_ECC_STATUS;
+               }
        } else {
-               addr = MC_INT_CAUSE;
-               cnt_addr = MC_ECC_STATUS;
+               addr = MC_REG(MC_P_INT_CAUSE, 1);
+               cnt_addr = MC_REG(MC_P_ECC_STATUS, 1);
        }
 
        v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
@@ -1873,6 +1900,8 @@ int t4_slow_intr_handler(struct adapter *adapter)
                pcie_intr_handler(adapter);
        if (cause & MC)
                mem_intr_handler(adapter, MEM_MC);
+       if (!is_t4(adapter->params.chip) && (cause & MC1))
+               mem_intr_handler(adapter, MEM_MC1);
        if (cause & EDC0)
                mem_intr_handler(adapter, MEM_EDC0);
        if (cause & EDC1)
@@ -2504,39 +2533,6 @@ int t4_fwaddrspace_write(struct adapter *adap, unsigned int mbox,
        return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
 }
 
-/**
- *     t4_mem_win_read_len - read memory through PCIE memory window
- *     @adap: the adapter
- *     @addr: address of first byte requested aligned on 32b.
- *     @data: len bytes to hold the data read
- *     @len: amount of data to read from window.  Must be <=
- *            MEMWIN0_APERATURE after adjusting for 16B for T4 and
- *            128B for T5 alignment requirements of the the memory window.
- *
- *     Read len bytes of data from MC starting at @addr.
- */
-int t4_mem_win_read_len(struct adapter *adap, u32 addr, __be32 *data, int len)
-{
-       int i, off;
-       u32 win_pf = is_t4(adap->params.chip) ? 0 : V_PFNUM(adap->fn);
-
-       /* Align on a 2KB boundary.
-        */
-       off = addr & MEMWIN0_APERTURE;
-       if ((addr & 3) || (len + off) > MEMWIN0_APERTURE)
-               return -EINVAL;
-
-       t4_write_reg(adap, PCIE_MEM_ACCESS_OFFSET,
-                    (addr & ~MEMWIN0_APERTURE) | win_pf);
-       t4_read_reg(adap, PCIE_MEM_ACCESS_OFFSET);
-
-       for (i = 0; i < len; i += 4)
-               *data++ = (__force __be32) t4_read_reg(adap,
-                                               (MEMWIN0_BASE + off + i));
-
-       return 0;
-}
-
 /**
  *     t4_mdio_rd - read a PHY register through MDIO
  *     @adap: the adapter
@@ -3174,6 +3170,46 @@ int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
        return ret;
 }
 
+/**
+ *      t4_set_params_nosleep - sets FW or device parameters
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @pf: the PF
+ *      @vf: the VF
+ *      @nparams: the number of parameters
+ *      @params: the parameter names
+ *      @val: the parameter values
+ *
+ *      Does not ever sleep
+ *      Sets the value of FW or device parameters.  Up to 7 parameters can be
+ *      specified at once.
+ */
+int t4_set_params_nosleep(struct adapter *adap, unsigned int mbox,
+                         unsigned int pf, unsigned int vf,
+                         unsigned int nparams, const u32 *params,
+                         const u32 *val)
+{
+       struct fw_params_cmd c;
+       __be32 *p = &c.param[0].mnem;
+
+       if (nparams > 7)
+               return -EINVAL;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_vfn = cpu_to_be32(FW_CMD_OP(FW_PARAMS_CMD) |
+                               FW_CMD_REQUEST | FW_CMD_WRITE |
+                               FW_PARAMS_CMD_PFN(pf) |
+                               FW_PARAMS_CMD_VFN(vf));
+       c.retval_len16 = cpu_to_be32(FW_LEN16(c));
+
+       while (nparams--) {
+               *p++ = cpu_to_be32(*params++);
+               *p++ = cpu_to_be32(*val++);
+       }
+
+       return t4_wr_mbox_ns(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_set_params - sets FW or device parameters
  *     @adap: the adapter
@@ -3498,6 +3534,33 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
        return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
 }
 
+/**
+ *      t4_enable_vi_params - enable/disable a virtual interface
+ *      @adap: the adapter
+ *      @mbox: mailbox to use for the FW command
+ *      @viid: the VI id
+ *      @rx_en: 1=enable Rx, 0=disable Rx
+ *      @tx_en: 1=enable Tx, 0=disable Tx
+ *      @dcb_en: 1=enable delivery of Data Center Bridging messages.
+ *
+ *      Enables/disables a virtual interface.  Note that setting DCB Enable
+ *      only makes sense when enabling a Virtual Interface ...
+ */
+int t4_enable_vi_params(struct adapter *adap, unsigned int mbox,
+                       unsigned int viid, bool rx_en, bool tx_en, bool dcb_en)
+{
+       struct fw_vi_enable_cmd c;
+
+       memset(&c, 0, sizeof(c));
+       c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+                            FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+
+       c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
+                              FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c) |
+                              FW_VI_ENABLE_CMD_DCB_INFO(dcb_en));
+       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
 /**
  *     t4_enable_vi - enable/disable a virtual interface
  *     @adap: the adapter
@@ -3511,14 +3574,7 @@ int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
 int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
                 bool rx_en, bool tx_en)
 {
-       struct fw_vi_enable_cmd c;
-
-       memset(&c, 0, sizeof(c));
-       c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
-                            FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
-       c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
-                              FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
-       return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+       return t4_enable_vi_params(adap, mbox, viid, rx_en, tx_en, 0);
 }
 
 /**
@@ -3962,6 +4018,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
                p->lport = j;
                p->rss_size = rss_size;
                memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+               adap->port[i]->dev_port = j;
 
                ret = ntohl(c.u.info.lstatus_to_modtype);
                p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
index 71b799b..35e3d8e 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
index 973eb11..0259fee 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -270,12 +270,15 @@ struct cpl_pass_accept_rpl {
 #define RX_COALESCE_VALID(x) ((x) << 11)
 #define RX_COALESCE(x)       ((x) << 12)
 #define PACE(x)              ((x) << 16)
+#define RX_FC_VALID         ((1U) << 19)
+#define RX_FC_DISABLE       ((1U) << 20)
 #define TX_QUEUE(x)          ((x) << 23)
 #define RX_CHANNEL(x)        ((x) << 26)
 #define CCTRL_ECN(x)         ((x) << 27)
 #define WND_SCALE_EN(x)      ((x) << 28)
 #define TSTAMPS_EN(x)        ((x) << 29)
 #define SACK_EN(x)           ((x) << 30)
+#define T5_OPT_2_VALID      ((1U) << 31)
        __be64 opt0;
 };
 
index 225ad8a..e3146e8 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2003-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
 #define V_NOCOALESCE(x) ((x) << S_NOCOALESCE)
 #define F_NOCOALESCE    V_NOCOALESCE(1U)
 
+#define SGE_TIMESTAMP_LO 0x1098
+#define SGE_TIMESTAMP_HI 0x109c
+#define S_TSVAL    0
+#define M_TSVAL    0xfffffffU
+#define GET_TSVAL(x) (((x) >> S_TSVAL) & M_TSVAL)
+
 #define SGE_TIMER_VALUE_0_AND_1 0x10b8
 #define  TIMERVALUE0_MASK   0xffff0000U
 #define  TIMERVALUE0_SHIFT  16
 #define  MSTGRPPERR      0x00000001U
 
 #define PCIE_NONFAT_ERR 0x3010
+#define PCIE_CFG_SPACE_REQ 0x3060
+#define PCIE_CFG_SPACE_DATA 0x3064
 #define PCIE_MEM_ACCESS_BASE_WIN 0x3068
 #define S_PCIEOFST       10
 #define M_PCIEOFST       0x3fffffU
 #define  WINDOW_MASK     0x000000ffU
 #define  WINDOW_SHIFT    0
 #define  WINDOW(x)       ((x) << WINDOW_SHIFT)
+#define  GET_WINDOW(x)  (((x) >> WINDOW_SHIFT) & WINDOW_MASK)
 #define PCIE_MEM_ACCESS_OFFSET 0x306c
+#define ENABLE (1U << 30)
+#define FUNCTION(x) ((x) << 12)
+#define F_LOCALCFG    (1U << 28)
 
 #define S_PFNUM    0
 #define V_PFNUM(x) ((x) << S_PFNUM)
 #define  TDUE 0x00010000U
 
 #define MC_INT_CAUSE 0x7518
+#define MC_P_INT_CAUSE 0x41318
 #define  ECC_UE_INT_CAUSE 0x00000004U
 #define  ECC_CE_INT_CAUSE 0x00000002U
 #define  PERR_INT_CAUSE   0x00000001U
 
 #define MC_ECC_STATUS 0x751c
+#define MC_P_ECC_STATUS 0x4131c
 #define  ECC_CECNT_MASK   0xffff0000U
 #define  ECC_CECNT_SHIFT  16
 #define  ECC_CECNT(x)     ((x) << ECC_CECNT_SHIFT)
 #define  I2CM       0x00000002U
 #define  CIM        0x00000001U
 
+#define MC1 0x31
 #define PL_INT_ENABLE 0x19410
 #define PL_INT_MAP0 0x19414
 #define PL_RST 0x19428
index 9cc973f..ff709e3 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * This file is part of the Chelsio T4 Ethernet driver for Linux.
  *
- * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ * Copyright (c) 2009-2014 Chelsio Communications, Inc. All rights reserved.
  *
  * This software is available to you under a choice of one of two
  * licenses.  You may choose to be licensed under the terms of the GNU
@@ -46,9 +46,11 @@ enum fw_retval {
        FW_EFAULT               = 14,   /* bad address; fw bad */
        FW_EBUSY                = 16,   /* resource busy */
        FW_EEXIST               = 17,   /* file exists */
+       FW_ENODEV               = 19,   /* no such device */
        FW_EINVAL               = 22,   /* invalid argument */
        FW_ENOSPC               = 28,   /* no space left on device */
        FW_ENOSYS               = 38,   /* functionality not implemented */
+       FW_ENODATA              = 61,   /* no data available */
        FW_EPROTO               = 71,   /* protocol error */
        FW_EADDRINUSE           = 98,   /* address already in use */
        FW_EADDRNOTAVAIL        = 99,   /* cannot assigned requested address */
@@ -932,6 +934,8 @@ enum fw_params_param_dev {
        FW_PARAMS_PARAM_DEV_FWREV = 0x0B,
        FW_PARAMS_PARAM_DEV_TPREV = 0x0C,
        FW_PARAMS_PARAM_DEV_CF = 0x0D,
+       FW_PARAMS_PARAM_DEV_MAXORDIRD_QP = 0x13, /* max supported QP IRD/ORD */
+       FW_PARAMS_PARAM_DEV_MAXIRD_ADAPTER = 0x14, /* max supported adap IRD */
        FW_PARAMS_PARAM_DEV_ULPTX_MEMWRITE_DSGL = 0x17,
 };
 
@@ -989,6 +993,7 @@ enum fw_params_param_dmaq {
        FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
        FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
        FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+       FW_PARAMS_PARAM_DMAQ_EQ_DCBPRIO_ETH = 0x13,
 };
 
 #define FW_PARAMS_MNEM(x)      ((x) << 24)
@@ -1422,6 +1427,7 @@ struct fw_vi_enable_cmd {
 #define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
 #define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
 #define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
+#define FW_VI_ENABLE_CMD_DCB_INFO(x) ((x) << 28)
 #define FW_VI_ENABLE_CMD_LED (1U << 29)
 
 /* VI VF stats offset definitions */
@@ -1594,6 +1600,9 @@ enum fw_port_action {
        FW_PORT_ACTION_GET_PORT_INFO    = 0x0003,
        FW_PORT_ACTION_L2_PPP_CFG       = 0x0004,
        FW_PORT_ACTION_L2_DCB_CFG       = 0x0005,
+       FW_PORT_ACTION_DCB_READ_TRANS   = 0x0006,
+       FW_PORT_ACTION_DCB_READ_RECV    = 0x0007,
+       FW_PORT_ACTION_DCB_READ_DET     = 0x0008,
        FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
        FW_PORT_ACTION_L1_LOW_PWR_EN    = 0x0011,
        FW_PORT_ACTION_L2_WOL_MODE_EN   = 0x0012,
@@ -1637,6 +1646,14 @@ enum fw_port_dcb_type {
        FW_PORT_DCB_TYPE_PRIORATE       = 0x02,
        FW_PORT_DCB_TYPE_PFC            = 0x03,
        FW_PORT_DCB_TYPE_APP_ID         = 0x04,
+       FW_PORT_DCB_TYPE_CONTROL        = 0x05,
+};
+
+enum fw_port_dcb_feature_state {
+       FW_PORT_DCB_FEATURE_STATE_PENDING = 0x0,
+       FW_PORT_DCB_FEATURE_STATE_SUCCESS = 0x1,
+       FW_PORT_DCB_FEATURE_STATE_ERROR = 0x2,
+       FW_PORT_DCB_FEATURE_STATE_TIMEOUT = 0x3,
 };
 
 struct fw_port_cmd {
@@ -1648,9 +1665,11 @@ struct fw_port_cmd {
                        __be32 r;
                } l1cfg;
                struct fw_port_l2cfg {
-                       __be16 ctlbf_to_ivlan0;
+                       __u8   ctlbf;
+                       __u8   ovlan3_to_ivlan0;
                        __be16 ivlantype;
-                       __be32 txipg_pkd;
+                       __be16 txipg_force_pinfo;
+                       __be16 mtu;
                        __be16 ovlan0mask;
                        __be16 ovlan0type;
                        __be16 ovlan1mask;
@@ -1666,24 +1685,60 @@ struct fw_port_cmd {
                        __be16 acap;
                        __be16 mtu;
                        __u8   cbllen;
-                       __u8   r9;
-                       __be32 r10;
-                       __be64 r11;
+                       __u8   auxlinfo;
+                       __u8   dcbxdis_pkd;
+                       __u8   r8_lo[3];
+                       __be64 r9;
                } info;
-               struct fw_port_ppp {
-                       __be32 pppen_to_ncsich;
-                       __be32 r11;
-               } ppp;
-               struct fw_port_dcb {
-                       __be16 cfg;
-                       u8 up_map;
-                       u8 sf_cfgrc;
-                       __be16 prot_ix;
-                       u8 pe7_to_pe0;
-                       u8 numTCPFCs;
-                       __be32 pgid0_to_pgid7;
-                       __be32 numTCs_oui;
-                       u8 pgpc[8];
+               struct fw_port_diags {
+                       __u8   diagop;
+                       __u8   r[3];
+                       __be32 diagval;
+               } diags;
+               union fw_port_dcb {
+                       struct fw_port_dcb_pgid {
+                               __u8   type;
+                               __u8   apply_pkd;
+                               __u8   r10_lo[2];
+                               __be32 pgid;
+                               __be64 r11;
+                       } pgid;
+                       struct fw_port_dcb_pgrate {
+                               __u8   type;
+                               __u8   apply_pkd;
+                               __u8   r10_lo[5];
+                               __u8   num_tcs_supported;
+                               __u8   pgrate[8];
+                       } pgrate;
+                       struct fw_port_dcb_priorate {
+                               __u8   type;
+                               __u8   apply_pkd;
+                               __u8   r10_lo[6];
+                               __u8   strict_priorate[8];
+                       } priorate;
+                       struct fw_port_dcb_pfc {
+                               __u8   type;
+                               __u8   pfcen;
+                               __u8   r10[5];
+                               __u8   max_pfc_tcs;
+                               __be64 r11;
+                       } pfc;
+                       struct fw_port_app_priority {
+                               __u8   type;
+                               __u8   r10[2];
+                               __u8   idx;
+                               __u8   user_prio_map;
+                               __u8   sel_field;
+                               __be16 protocolid;
+                               __be64 r12;
+                       } app_priority;
+                       struct fw_port_dcb_control {
+                               __u8   type;
+                               __u8   all_syncd_pkd;
+                               __be16 pfc_state_to_app_state;
+                               __be32 r11;
+                               __be64 r12;
+                       } control;
                } dcb;
        } u;
 };
@@ -1720,6 +1775,10 @@ struct fw_port_cmd {
 #define FW_PORT_CMD_MODTYPE_MASK 0x1f
 #define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
 
+#define FW_PORT_CMD_DCBXDIS (1U << 7)
+#define FW_PORT_CMD_APPLY (1U <<  7)
+#define FW_PORT_CMD_ALL_SYNCD (1U << 7)
+
 #define FW_PORT_CMD_PPPEN(x) ((x) << 31)
 #define FW_PORT_CMD_TPSRC(x) ((x) << 28)
 #define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
index ff1cdd1..f002af1 100644 (file)
@@ -2924,6 +2924,15 @@ static DEFINE_PCI_DEVICE_TABLE(cxgb4vf_pci_tbl) = {
        CH_DEVICE(0x480a, 0),   /* T404-bt */
        CH_DEVICE(0x480d, 0),   /* T480-cr */
        CH_DEVICE(0x480e, 0),   /* T440-lp-cr */
+       CH_DEVICE(0x4880, 0),
+       CH_DEVICE(0x4880, 1),
+       CH_DEVICE(0x4880, 2),
+       CH_DEVICE(0x4880, 3),
+       CH_DEVICE(0x4880, 4),
+       CH_DEVICE(0x4880, 5),
+       CH_DEVICE(0x4880, 6),
+       CH_DEVICE(0x4880, 7),
+       CH_DEVICE(0x4880, 8),
        CH_DEVICE(0x5800, 0),   /* T580-dbg */
        CH_DEVICE(0x5801, 0),   /* T520-cr */
        CH_DEVICE(0x5802, 0),   /* T522-cr */
index fe84fba..9823a0e 100644 (file)
@@ -145,7 +145,6 @@ struct net_local {
        int force;              /* force various values; see FORCE* above. */
        spinlock_t lock;
        void __iomem *virt_addr;/* CS89x0 virtual address. */
-       unsigned long size;     /* Length of CS89x0 memory region. */
 #if ALLOW_DMA
        int use_dma;            /* Flag: we're using dma */
        int dma;                /* DMA channel */
@@ -1854,41 +1853,29 @@ static int __init cs89x0_platform_probe(struct platform_device *pdev)
 
        lp = netdev_priv(dev);
 
-       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        dev->irq = platform_get_irq(pdev, 0);
-       if (mem_res == NULL || dev->irq <= 0) {
-               dev_warn(&dev->dev, "memory/interrupt resource missing\n");
+       if (dev->irq <= 0) {
+               dev_warn(&dev->dev, "interrupt resource missing\n");
                err = -ENXIO;
                goto free;
        }
 
-       lp->size = resource_size(mem_res);
-       if (!request_mem_region(mem_res->start, lp->size, DRV_NAME)) {
-               dev_warn(&dev->dev, "request_mem_region() failed\n");
-               err = -EBUSY;
+       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       virt_addr = devm_ioremap_resource(&pdev->dev, mem_res);
+       if (IS_ERR(virt_addr)) {
+               err = PTR_ERR(virt_addr);
                goto free;
        }
 
-       virt_addr = ioremap(mem_res->start, lp->size);
-       if (!virt_addr) {
-               dev_warn(&dev->dev, "ioremap() failed\n");
-               err = -ENOMEM;
-               goto release;
-       }
-
        err = cs89x0_probe1(dev, virt_addr, 0);
        if (err) {
                dev_warn(&dev->dev, "no cs8900 or cs8920 detected\n");
-               goto unmap;
+               goto free;
        }
 
        platform_set_drvdata(pdev, dev);
        return 0;
 
-unmap:
-       iounmap(virt_addr);
-release:
-       release_mem_region(mem_res->start, lp->size);
 free:
        free_netdev(dev);
        return err;
@@ -1897,17 +1884,12 @@ free:
 static int cs89x0_platform_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
-       struct net_local *lp = netdev_priv(dev);
-       struct resource *mem_res;
 
        /* This platform_get_resource() call will not return NULL, because
         * the same call in cs89x0_platform_probe() has returned a non NULL
         * value.
         */
-       mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        unregister_netdev(dev);
-       iounmap(lp->virt_addr);
-       release_mem_region(mem_res->start, lp->size);
        free_netdev(dev);
        return 0;
 }
index 239e1e4..aadcaf7 100644 (file)
@@ -2,5 +2,5 @@ obj-$(CONFIG_ENIC) := enic.o
 
 enic-y := enic_main.o vnic_cq.o vnic_intr.o vnic_wq.o \
        enic_res.o enic_dev.o enic_pp.o vnic_dev.o vnic_rq.o vnic_vic.o \
-       enic_ethtool.o enic_api.o
+       enic_ethtool.o enic_api.o enic_clsf.o
 
index 14f465f..962510f 100644 (file)
@@ -33,7 +33,7 @@
 
 #define DRV_NAME               "enic"
 #define DRV_DESCRIPTION                "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION            "2.1.1.50"
+#define DRV_VERSION            "2.1.1.67"
 #define DRV_COPYRIGHT          "Copyright 2008-2013 Cisco Systems, Inc"
 
 #define ENIC_BARS_MAX          6
@@ -99,6 +99,41 @@ struct enic_port_profile {
        u8 mac_addr[ETH_ALEN];
 };
 
+/* enic_rfs_fltr_node - rfs filter node in hash table
+ *     @@keys: IPv4 5 tuple
+ *     @flow_id: flow_id of clsf filter provided by kernel
+ *     @fltr_id: filter id of clsf filter returned by adaptor
+ *     @rq_id: desired rq index
+ *     @node: hlist_node
+ */
+struct enic_rfs_fltr_node {
+       struct flow_keys keys;
+       u32 flow_id;
+       u16 fltr_id;
+       u16 rq_id;
+       struct hlist_node node;
+};
+
+/* enic_rfs_flw_tbl - rfs flow table
+ *     @max: Maximum number of filters vNIC supports
+ *     @free: Number of free filters available
+ *     @toclean: hash table index to clean next
+ *     @ht_head: hash table list head
+ *     @lock: spin lock
+ *     @rfs_may_expire: timer function for enic_rps_may_expire_flow
+ */
+struct enic_rfs_flw_tbl {
+       u16 max;
+       int free;
+
+#define ENIC_RFS_FLW_BITSHIFT  (10)
+#define ENIC_RFS_FLW_MASK      ((1 << ENIC_RFS_FLW_BITSHIFT) - 1)
+       u16 toclean:ENIC_RFS_FLW_BITSHIFT;
+       struct hlist_head ht_head[1 << ENIC_RFS_FLW_BITSHIFT];
+       spinlock_t lock;
+       struct timer_list rfs_may_expire;
+};
+
 /* Per-instance private data structure */
 struct enic {
        struct net_device *netdev;
@@ -140,7 +175,7 @@ struct enic {
        unsigned int rq_count;
        u64 rq_truncated_pkts;
        u64 rq_bad_fcs;
-       struct napi_struct napi[ENIC_RQ_MAX];
+       struct napi_struct napi[ENIC_RQ_MAX + ENIC_WQ_MAX];
 
        /* interrupt resource cache line section */
        ____cacheline_aligned struct vnic_intr intr[ENIC_INTR_MAX];
@@ -150,6 +185,7 @@ struct enic {
        /* completion queue cache line section */
        ____cacheline_aligned struct vnic_cq cq[ENIC_CQ_MAX];
        unsigned int cq_count;
+       struct enic_rfs_flw_tbl rfs_h;
 };
 
 static inline struct device *enic_get_dev(struct enic *enic)
index e13efbd..b161f24 100644 (file)
@@ -34,13 +34,13 @@ int enic_api_devcmd_proxy_by_index(struct net_device *netdev, int vf,
        struct vnic_dev *vdev = enic->vdev;
 
        spin_lock(&enic->enic_api_lock);
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
 
        vnic_dev_cmd_proxy_by_index_start(vdev, vf);
        err = vnic_dev_cmd(vdev, cmd, a0, a1, wait);
        vnic_dev_cmd_proxy_end(vdev);
 
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
        spin_unlock(&enic->enic_api_lock);
 
        return err;
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
new file mode 100644 (file)
index 0000000..69dfd3c
--- /dev/null
@@ -0,0 +1,284 @@
+#include <linux/if.h>
+#include <linux/if_ether.h>
+#include <linux/if_link.h>
+#include <linux/netdevice.h>
+#include <linux/in.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <net/flow_keys.h>
+#include "enic_res.h"
+#include "enic_clsf.h"
+
+/* enic_addfltr_5t - Add ipv4 5tuple filter
+ *     @enic: enic struct of vnic
+ *     @keys: flow_keys of ipv4 5tuple
+ *     @rq: rq number to steer to
+ *
+ * This function returns filter_id(hardware_id) of the filter
+ * added. In case of error it returns an negative number.
+ */
+int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq)
+{
+       int res;
+       struct filter data;
+
+       switch (keys->ip_proto) {
+       case IPPROTO_TCP:
+               data.u.ipv4.protocol = PROTO_TCP;
+               break;
+       case IPPROTO_UDP:
+               data.u.ipv4.protocol = PROTO_UDP;
+               break;
+       default:
+               return -EPROTONOSUPPORT;
+       };
+       data.type = FILTER_IPV4_5TUPLE;
+       data.u.ipv4.src_addr = ntohl(keys->src);
+       data.u.ipv4.dst_addr = ntohl(keys->dst);
+       data.u.ipv4.src_port = ntohs(keys->port16[0]);
+       data.u.ipv4.dst_port = ntohs(keys->port16[1]);
+       data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE;
+
+       spin_lock_bh(&enic->devcmd_lock);
+       res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data);
+       spin_unlock_bh(&enic->devcmd_lock);
+       res = (res == 0) ? rq : res;
+
+       return res;
+}
+
+/* enic_delfltr - Delete clsf filter
+ *     @enic: enic struct of vnic
+ *     @filter_id: filter_is(hardware_id) of filter to be deleted
+ *
+ * This function returns zero in case of success, negative number incase of
+ * error.
+ */
+int enic_delfltr(struct enic *enic, u16 filter_id)
+{
+       int ret;
+
+       spin_lock_bh(&enic->devcmd_lock);
+       ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL);
+       spin_unlock_bh(&enic->devcmd_lock);
+
+       return ret;
+}
+
+/* enic_rfs_flw_tbl_init - initialize enic->rfs_h members
+ *     @enic: enic data
+ */
+void enic_rfs_flw_tbl_init(struct enic *enic)
+{
+       int i;
+
+       spin_lock_init(&enic->rfs_h.lock);
+       for (i = 0; i <= ENIC_RFS_FLW_MASK; i++)
+               INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]);
+       enic->rfs_h.max = enic->config.num_arfs;
+       enic->rfs_h.free = enic->rfs_h.max;
+       enic->rfs_h.toclean = 0;
+       enic_rfs_timer_start(enic);
+}
+
+void enic_rfs_flw_tbl_free(struct enic *enic)
+{
+       int i;
+
+       enic_rfs_timer_stop(enic);
+       spin_lock(&enic->rfs_h.lock);
+       enic->rfs_h.free = 0;
+       for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[i];
+               hlist_for_each_entry_safe(n, tmp, hhead, node) {
+                       enic_delfltr(enic, n->fltr_id);
+                       hlist_del(&n->node);
+                       kfree(n);
+               }
+       }
+       spin_unlock(&enic->rfs_h.lock);
+}
+
+struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
+{
+       int i;
+
+       for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[i];
+               hlist_for_each_entry_safe(n, tmp, hhead, node)
+                       if (n->fltr_id == fltr_id)
+                               return n;
+       }
+
+       return NULL;
+}
+
+#ifdef CONFIG_RFS_ACCEL
+void enic_flow_may_expire(unsigned long data)
+{
+       struct enic *enic = (struct enic *)data;
+       bool res;
+       int j;
+
+       spin_lock(&enic->rfs_h.lock);
+       for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++];
+               hlist_for_each_entry_safe(n, tmp, hhead, node) {
+                       res = rps_may_expire_flow(enic->netdev, n->rq_id,
+                                                 n->flow_id, n->fltr_id);
+                       if (res) {
+                               res = enic_delfltr(enic, n->fltr_id);
+                               if (unlikely(res))
+                                       continue;
+                               hlist_del(&n->node);
+                               kfree(n);
+                               enic->rfs_h.free++;
+                       }
+               }
+       }
+       spin_unlock(&enic->rfs_h.lock);
+       mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
+static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h,
+                                                 struct flow_keys *k)
+{
+       struct enic_rfs_fltr_node *tpos;
+
+       hlist_for_each_entry(tpos, h, node)
+               if (tpos->keys.src == k->src &&
+                   tpos->keys.dst == k->dst &&
+                   tpos->keys.ports == k->ports &&
+                   tpos->keys.ip_proto == k->ip_proto &&
+                   tpos->keys.n_proto == k->n_proto)
+                       return tpos;
+       return NULL;
+}
+
+int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                      u16 rxq_index, u32 flow_id)
+{
+       struct flow_keys keys;
+       struct enic_rfs_fltr_node *n;
+       struct enic *enic;
+       u16 tbl_idx;
+       int res, i;
+
+       enic = netdev_priv(dev);
+       res = skb_flow_dissect(skb, &keys);
+       if (!res || keys.n_proto != htons(ETH_P_IP) ||
+           (keys.ip_proto != IPPROTO_TCP && keys.ip_proto != IPPROTO_UDP))
+               return -EPROTONOSUPPORT;
+
+       tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
+       spin_lock(&enic->rfs_h.lock);
+       n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
+
+       if (n) { /* entry already present  */
+               if (rxq_index == n->rq_id) {
+                       res = -EEXIST;
+                       goto ret_unlock;
+               }
+
+               /* desired rq changed for the flow, we need to delete
+                * old fltr and add new one
+                *
+                * The moment we delete the fltr, the upcoming pkts
+                * are put it default rq based on rss. When we add
+                * new filter, upcoming pkts are put in desired queue.
+                * This could cause ooo pkts.
+                *
+                * Lets 1st try adding new fltr and then del old one.
+                */
+               i = --enic->rfs_h.free;
+               /* clsf tbl is full, we have to del old fltr first*/
+               if (unlikely(i < 0)) {
+                       enic->rfs_h.free++;
+                       res = enic_delfltr(enic, n->fltr_id);
+                       if (unlikely(res < 0))
+                               goto ret_unlock;
+                       res = enic_addfltr_5t(enic, &keys, rxq_index);
+                       if (res < 0) {
+                               hlist_del(&n->node);
+                               enic->rfs_h.free++;
+                               goto ret_unlock;
+                       }
+               /* add new fltr 1st then del old fltr */
+               } else {
+                       int ret;
+
+                       res = enic_addfltr_5t(enic, &keys, rxq_index);
+                       if (res < 0) {
+                               enic->rfs_h.free++;
+                               goto ret_unlock;
+                       }
+                       ret = enic_delfltr(enic, n->fltr_id);
+                       /* deleting old fltr failed. Add old fltr to list.
+                        * enic_flow_may_expire() will try to delete it later.
+                        */
+                       if (unlikely(ret < 0)) {
+                               struct enic_rfs_fltr_node *d;
+                               struct hlist_head *head;
+
+                               head = &enic->rfs_h.ht_head[tbl_idx];
+                               d = kmalloc(sizeof(*d), GFP_ATOMIC);
+                               if (d) {
+                                       d->fltr_id = n->fltr_id;
+                                       INIT_HLIST_NODE(&d->node);
+                                       hlist_add_head(&d->node, head);
+                               }
+                       } else {
+                               enic->rfs_h.free++;
+                       }
+               }
+               n->rq_id = rxq_index;
+               n->fltr_id = res;
+               n->flow_id = flow_id;
+       /* entry not present */
+       } else {
+               i = --enic->rfs_h.free;
+               if (i <= 0) {
+                       enic->rfs_h.free++;
+                       res = -EBUSY;
+                       goto ret_unlock;
+               }
+
+               n = kmalloc(sizeof(*n), GFP_ATOMIC);
+               if (!n) {
+                       res = -ENOMEM;
+                       enic->rfs_h.free++;
+                       goto ret_unlock;
+               }
+
+               res = enic_addfltr_5t(enic, &keys, rxq_index);
+               if (res < 0) {
+                       kfree(n);
+                       enic->rfs_h.free++;
+                       goto ret_unlock;
+               }
+               n->rq_id = rxq_index;
+               n->fltr_id = res;
+               n->flow_id = flow_id;
+               n->keys = keys;
+               INIT_HLIST_NODE(&n->node);
+               hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]);
+       }
+
+ret_unlock:
+       spin_unlock(&enic->rfs_h.lock);
+       return res;
+}
+
+#endif /* CONFIG_RFS_ACCEL */
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.h b/drivers/net/ethernet/cisco/enic/enic_clsf.h
new file mode 100644 (file)
index 0000000..6aa9f89
--- /dev/null
@@ -0,0 +1,37 @@
+#ifndef _ENIC_CLSF_H_
+#define _ENIC_CLSF_H_
+
+#include "vnic_dev.h"
+#include "enic.h"
+
+#define ENIC_CLSF_EXPIRE_COUNT 128
+
+int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq);
+int enic_delfltr(struct enic *enic, u16 filter_id);
+void enic_rfs_flw_tbl_init(struct enic *enic);
+void enic_rfs_flw_tbl_free(struct enic *enic);
+struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id);
+
+#ifdef CONFIG_RFS_ACCEL
+int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
+                      u16 rxq_index, u32 flow_id);
+void enic_flow_may_expire(unsigned long data);
+
+static inline void enic_rfs_timer_start(struct enic *enic)
+{
+       init_timer(&enic->rfs_h.rfs_may_expire);
+       enic->rfs_h.rfs_may_expire.function = enic_flow_may_expire;
+       enic->rfs_h.rfs_may_expire.data = (unsigned long)enic;
+       mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
+}
+
+static inline void enic_rfs_timer_stop(struct enic *enic)
+{
+       del_timer_sync(&enic->rfs_h.rfs_may_expire);
+}
+#else
+static inline void enic_rfs_timer_start(struct enic *enic) {}
+static inline void enic_rfs_timer_stop(struct enic *enic) {}
+#endif /* CONFIG_RFS_ACCEL */
+
+#endif /* _ENIC_CLSF_H_ */
index 3e27df5..87ddc44 100644 (file)
@@ -29,9 +29,9 @@ int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_fw_info(enic->vdev, fw_info);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -40,9 +40,9 @@ int enic_dev_stats_dump(struct enic *enic, struct vnic_stats **vstats)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_stats_dump(enic->vdev, vstats);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -54,9 +54,9 @@ int enic_dev_add_station_addr(struct enic *enic)
        if (!is_valid_ether_addr(enic->netdev->dev_addr))
                return -EADDRNOTAVAIL;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_add_addr(enic->vdev, enic->netdev->dev_addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -68,9 +68,9 @@ int enic_dev_del_station_addr(struct enic *enic)
        if (!is_valid_ether_addr(enic->netdev->dev_addr))
                return -EADDRNOTAVAIL;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_del_addr(enic->vdev, enic->netdev->dev_addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -80,10 +80,10 @@ int enic_dev_packet_filter(struct enic *enic, int directed, int multicast,
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_packet_filter(enic->vdev, directed,
                multicast, broadcast, promisc, allmulti);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -92,9 +92,9 @@ int enic_dev_add_addr(struct enic *enic, const u8 *addr)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_add_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -103,9 +103,9 @@ int enic_dev_del_addr(struct enic *enic, const u8 *addr)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_del_addr(enic->vdev, addr);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -114,9 +114,9 @@ int enic_dev_notify_unset(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_notify_unset(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -125,9 +125,9 @@ int enic_dev_hang_notify(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_hang_notify(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -136,10 +136,10 @@ int enic_dev_set_ig_vlan_rewrite_mode(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_set_ig_vlan_rewrite_mode(enic->vdev,
                IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -148,9 +148,9 @@ int enic_dev_enable(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_enable_wait(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -159,9 +159,9 @@ int enic_dev_disable(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_disable(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -170,9 +170,9 @@ int enic_dev_intr_coal_timer_info(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_intr_coal_timer_info(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -181,9 +181,9 @@ int enic_vnic_dev_deinit(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_deinit(enic->vdev);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -192,10 +192,10 @@ int enic_dev_init_prov2(struct enic *enic, struct vic_provinfo *vp)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_init_prov2(enic->vdev,
                (u8 *)vp, vic_provinfo_size(vp));
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -204,9 +204,9 @@ int enic_dev_deinit_done(struct enic *enic, int *status)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_deinit_done(enic->vdev, status);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -217,9 +217,9 @@ int enic_vlan_rx_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct enic *enic = netdev_priv(netdev);
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_add_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -230,9 +230,9 @@ int enic_vlan_rx_kill_vid(struct net_device *netdev, __be16 proto, u16 vid)
        struct enic *enic = netdev_priv(netdev);
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_del_vlan(enic, vid);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -241,9 +241,9 @@ int enic_dev_enable2(struct enic *enic, int active)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_enable2(enic->vdev, active);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -252,9 +252,9 @@ int enic_dev_enable2_done(struct enic *enic, int *status)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = vnic_dev_enable2_done(enic->vdev, status);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
index 36ea1ab..10bb970 100644 (file)
@@ -28,7 +28,7 @@
  */
 #define ENIC_DEVCMD_PROXY_BY_INDEX(vf, err, enic, vnicdevcmdfn, ...) \
        do { \
-               spin_lock(&enic->devcmd_lock); \
+               spin_lock_bh(&enic->devcmd_lock); \
                if (enic_is_valid_vf(enic, vf)) { \
                        vnic_dev_cmd_proxy_by_index_start(enic->vdev, vf); \
                        err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
@@ -36,7 +36,7 @@
                } else { \
                        err = vnicdevcmdfn(enic->vdev, ##__VA_ARGS__); \
                } \
-               spin_unlock(&enic->devcmd_lock); \
+               spin_unlock_bh(&enic->devcmd_lock); \
        } while (0)
 
 int enic_dev_fw_info(struct enic *enic, struct vnic_devcmd_fw_info **fw_info);
index 2e50b54..523c9ce 100644 (file)
@@ -22,6 +22,7 @@
 #include "enic_res.h"
 #include "enic.h"
 #include "enic_dev.h"
+#include "enic_clsf.h"
 
 struct enic_stat {
        char name[ETH_GSTRING_LEN];
@@ -231,7 +232,7 @@ static int enic_set_coalesce(struct net_device *netdev,
                if (ecmd->use_adaptive_rx_coalesce      ||
                    ecmd->rx_coalesce_usecs_low         ||
                    ecmd->rx_coalesce_usecs_high)
-                       return -EOPNOTSUPP;
+                       return -EINVAL;
 
                intr = enic_legacy_io_intr();
                vnic_intr_coalescing_timer_set(&enic->intr[intr],
@@ -243,34 +244,29 @@ static int enic_set_coalesce(struct net_device *netdev,
                if (ecmd->use_adaptive_rx_coalesce      ||
                    ecmd->rx_coalesce_usecs_low         ||
                    ecmd->rx_coalesce_usecs_high)
-                       return -EOPNOTSUPP;
+                       return -EINVAL;
 
                vnic_intr_coalescing_timer_set(&enic->intr[0],
                        tx_coalesce_usecs);
                break;
        case VNIC_DEV_INTR_MODE_MSIX:
+               if (ecmd->rx_coalesce_usecs_high &&
+                   (rx_coalesce_usecs_high <
+                    rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
+                               return -EINVAL;
+
                for (i = 0; i < enic->wq_count; i++) {
                        intr = enic_msix_wq_intr(enic, i);
                        vnic_intr_coalescing_timer_set(&enic->intr[intr],
                                tx_coalesce_usecs);
                }
 
-               if (rxcoal->use_adaptive_rx_coalesce) {
-                       if (!ecmd->use_adaptive_rx_coalesce) {
-                               rxcoal->use_adaptive_rx_coalesce = 0;
-                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-                       }
-               } else {
-                       if (ecmd->use_adaptive_rx_coalesce)
-                               rxcoal->use_adaptive_rx_coalesce = 1;
-                       else
-                               enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
-               }
+               rxcoal->use_adaptive_rx_coalesce =
+                                       !!ecmd->use_adaptive_rx_coalesce;
+               if (!rxcoal->use_adaptive_rx_coalesce)
+                       enic_intr_coal_set_rx(enic, rx_coalesce_usecs);
 
                if (ecmd->rx_coalesce_usecs_high) {
-                       if (rx_coalesce_usecs_high <
-                           (rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF))
-                               return -EINVAL;
                        rxcoal->range_end = rx_coalesce_usecs_high;
                        rxcoal->small_pkt_range_start = rx_coalesce_usecs_low;
                        rxcoal->large_pkt_range_start = rx_coalesce_usecs_low +
@@ -287,6 +283,102 @@ static int enic_set_coalesce(struct net_device *netdev,
        return 0;
 }
 
+static int enic_grxclsrlall(struct enic *enic, struct ethtool_rxnfc *cmd,
+                           u32 *rule_locs)
+{
+       int j, ret = 0, cnt = 0;
+
+       cmd->data = enic->rfs_h.max - enic->rfs_h.free;
+       for (j = 0; j < (1 << ENIC_RFS_FLW_BITSHIFT); j++) {
+               struct hlist_head *hhead;
+               struct hlist_node *tmp;
+               struct enic_rfs_fltr_node *n;
+
+               hhead = &enic->rfs_h.ht_head[j];
+               hlist_for_each_entry_safe(n, tmp, hhead, node) {
+                       if (cnt == cmd->rule_cnt)
+                               return -EMSGSIZE;
+                       rule_locs[cnt] = n->fltr_id;
+                       cnt++;
+               }
+       }
+       cmd->rule_cnt = cnt;
+
+       return ret;
+}
+
+static int enic_grxclsrule(struct enic *enic, struct ethtool_rxnfc *cmd)
+{
+       struct ethtool_rx_flow_spec *fsp =
+                               (struct ethtool_rx_flow_spec *)&cmd->fs;
+       struct enic_rfs_fltr_node *n;
+
+       n = htbl_fltr_search(enic, (u16)fsp->location);
+       if (!n)
+               return -EINVAL;
+       switch (n->keys.ip_proto) {
+       case IPPROTO_TCP:
+               fsp->flow_type = TCP_V4_FLOW;
+               break;
+       case IPPROTO_UDP:
+               fsp->flow_type = UDP_V4_FLOW;
+               break;
+       default:
+               return -EINVAL;
+               break;
+       }
+
+       fsp->h_u.tcp_ip4_spec.ip4src = n->keys.src;
+       fsp->m_u.tcp_ip4_spec.ip4src = (__u32)~0;
+
+       fsp->h_u.tcp_ip4_spec.ip4dst = n->keys.dst;
+       fsp->m_u.tcp_ip4_spec.ip4dst = (__u32)~0;
+
+       fsp->h_u.tcp_ip4_spec.psrc = n->keys.port16[0];
+       fsp->m_u.tcp_ip4_spec.psrc = (__u16)~0;
+
+       fsp->h_u.tcp_ip4_spec.pdst = n->keys.port16[1];
+       fsp->m_u.tcp_ip4_spec.pdst = (__u16)~0;
+
+       fsp->ring_cookie = n->rq_id;
+
+       return 0;
+}
+
+static int enic_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd,
+                         u32 *rule_locs)
+{
+       struct enic *enic = netdev_priv(dev);
+       int ret = 0;
+
+       switch (cmd->cmd) {
+       case ETHTOOL_GRXRINGS:
+               cmd->data = enic->rq_count;
+               break;
+       case ETHTOOL_GRXCLSRLCNT:
+               spin_lock_bh(&enic->rfs_h.lock);
+               cmd->rule_cnt = enic->rfs_h.max - enic->rfs_h.free;
+               cmd->data = enic->rfs_h.max;
+               spin_unlock_bh(&enic->rfs_h.lock);
+               break;
+       case ETHTOOL_GRXCLSRLALL:
+               spin_lock_bh(&enic->rfs_h.lock);
+               ret = enic_grxclsrlall(enic, cmd, rule_locs);
+               spin_unlock_bh(&enic->rfs_h.lock);
+               break;
+       case ETHTOOL_GRXCLSRULE:
+               spin_lock_bh(&enic->rfs_h.lock);
+               ret = enic_grxclsrule(enic, cmd);
+               spin_unlock_bh(&enic->rfs_h.lock);
+               break;
+       default:
+               ret = -EOPNOTSUPP;
+               break;
+       }
+
+       return ret;
+}
+
 static const struct ethtool_ops enic_ethtool_ops = {
        .get_settings = enic_get_settings,
        .get_drvinfo = enic_get_drvinfo,
@@ -298,6 +390,7 @@ static const struct ethtool_ops enic_ethtool_ops = {
        .get_ethtool_stats = enic_get_ethtool_stats,
        .get_coalesce = enic_get_coalesce,
        .set_coalesce = enic_set_coalesce,
+       .get_rxnfc = enic_get_rxnfc,
 };
 
 void enic_set_ethtool_ops(struct net_device *netdev)
index f32f828..9348feb 100644 (file)
 #include <linux/prefetch.h>
 #include <net/ip6_checksum.h>
 #include <linux/ktime.h>
+#ifdef CONFIG_RFS_ACCEL
+#include <linux/cpu_rmap.h>
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#include <net/busy_poll.h>
+#endif
 
 #include "cq_enet_desc.h"
 #include "vnic_dev.h"
@@ -49,6 +55,7 @@
 #include "enic.h"
 #include "enic_dev.h"
 #include "enic_pp.h"
+#include "enic_clsf.h"
 
 #define ENIC_NOTIFY_TIMER_PERIOD       (2 * HZ)
 #define WQ_ENET_MAX_DESC_LEN           (1 << WQ_ENET_LEN_BITS)
@@ -309,40 +316,15 @@ static irqreturn_t enic_isr_msi(int irq, void *data)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t enic_isr_msix_rq(int irq, void *data)
+static irqreturn_t enic_isr_msix(int irq, void *data)
 {
        struct napi_struct *napi = data;
 
-       /* schedule NAPI polling for RQ cleanup */
        napi_schedule(napi);
 
        return IRQ_HANDLED;
 }
 
-static irqreturn_t enic_isr_msix_wq(int irq, void *data)
-{
-       struct enic *enic = data;
-       unsigned int cq;
-       unsigned int intr;
-       unsigned int wq_work_to_do = -1; /* no limit */
-       unsigned int wq_work_done;
-       unsigned int wq_irq;
-
-       wq_irq = (u32)irq - enic->msix_entry[enic_msix_wq_intr(enic, 0)].vector;
-       cq = enic_cq_wq(enic, wq_irq);
-       intr = enic_msix_wq_intr(enic, wq_irq);
-
-       wq_work_done = vnic_cq_service(&enic->cq[cq],
-               wq_work_to_do, enic_wq_service, NULL);
-
-       vnic_intr_return_credits(&enic->intr[intr],
-               wq_work_done,
-               1 /* unmask intr */,
-               1 /* reset intr timer */);
-
-       return IRQ_HANDLED;
-}
-
 static irqreturn_t enic_isr_msix_err(int irq, void *data)
 {
        struct enic *enic = data;
@@ -1049,10 +1031,12 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
                if (vlan_stripped)
                        __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan_tci);
 
-               if (netdev->features & NETIF_F_GRO)
-                       napi_gro_receive(&enic->napi[q_number], skb);
-               else
+               skb_mark_napi_id(skb, &enic->napi[rq->index]);
+               if (enic_poll_busy_polling(rq) ||
+                   !(netdev->features & NETIF_F_GRO))
                        netif_receive_skb(skb);
+               else
+                       napi_gro_receive(&enic->napi[q_number], skb);
                if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
                        enic_intr_update_pkt_size(&cq->pkt_size_counter,
                                                  bytes_written);
@@ -1089,16 +1073,22 @@ static int enic_poll(struct napi_struct *napi, int budget)
        unsigned int  work_done, rq_work_done = 0, wq_work_done;
        int err;
 
-       /* Service RQ (first) and WQ
-        */
+       wq_work_done = vnic_cq_service(&enic->cq[cq_wq], wq_work_to_do,
+                                      enic_wq_service, NULL);
+
+       if (!enic_poll_lock_napi(&enic->rq[cq_rq])) {
+               if (wq_work_done > 0)
+                       vnic_intr_return_credits(&enic->intr[intr],
+                                                wq_work_done,
+                                                0 /* dont unmask intr */,
+                                                0 /* dont reset intr timer */);
+               return rq_work_done;
+       }
 
        if (budget > 0)
                rq_work_done = vnic_cq_service(&enic->cq[cq_rq],
                        rq_work_to_do, enic_rq_service, NULL);
 
-       wq_work_done = vnic_cq_service(&enic->cq[cq_wq],
-               wq_work_to_do, enic_wq_service, NULL);
-
        /* Accumulate intr event credits for this polling
         * cycle.  An intr event is the completion of a
         * a WQ or RQ packet.
@@ -1130,6 +1120,7 @@ static int enic_poll(struct napi_struct *napi, int budget)
                napi_complete(napi);
                vnic_intr_unmask(&enic->intr[intr]);
        }
+       enic_poll_unlock_napi(&enic->rq[cq_rq]);
 
        return rq_work_done;
 }
@@ -1192,7 +1183,102 @@ static void enic_calc_int_moderation(struct enic *enic, struct vnic_rq *rq)
        pkt_size_counter->small_pkt_bytes_cnt = 0;
 }
 
-static int enic_poll_msix(struct napi_struct *napi, int budget)
+#ifdef CONFIG_RFS_ACCEL
+static void enic_free_rx_cpu_rmap(struct enic *enic)
+{
+       free_irq_cpu_rmap(enic->netdev->rx_cpu_rmap);
+       enic->netdev->rx_cpu_rmap = NULL;
+}
+
+static void enic_set_rx_cpu_rmap(struct enic *enic)
+{
+       int i, res;
+
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) {
+               enic->netdev->rx_cpu_rmap = alloc_irq_cpu_rmap(enic->rq_count);
+               if (unlikely(!enic->netdev->rx_cpu_rmap))
+                       return;
+               for (i = 0; i < enic->rq_count; i++) {
+                       res = irq_cpu_rmap_add(enic->netdev->rx_cpu_rmap,
+                                              enic->msix_entry[i].vector);
+                       if (unlikely(res)) {
+                               enic_free_rx_cpu_rmap(enic);
+                               return;
+                       }
+               }
+       }
+}
+
+#else
+
+static void enic_free_rx_cpu_rmap(struct enic *enic)
+{
+}
+
+static void enic_set_rx_cpu_rmap(struct enic *enic)
+{
+}
+
+#endif /* CONFIG_RFS_ACCEL */
+
+#ifdef CONFIG_NET_RX_BUSY_POLL
+int enic_busy_poll(struct napi_struct *napi)
+{
+       struct net_device *netdev = napi->dev;
+       struct enic *enic = netdev_priv(netdev);
+       unsigned int rq = (napi - &enic->napi[0]);
+       unsigned int cq = enic_cq_rq(enic, rq);
+       unsigned int intr = enic_msix_rq_intr(enic, rq);
+       unsigned int work_to_do = -1; /* clean all pkts possible */
+       unsigned int work_done;
+
+       if (!enic_poll_lock_poll(&enic->rq[rq]))
+               return LL_FLUSH_BUSY;
+       work_done = vnic_cq_service(&enic->cq[cq], work_to_do,
+                                   enic_rq_service, NULL);
+
+       if (work_done > 0)
+               vnic_intr_return_credits(&enic->intr[intr],
+                                        work_done, 0, 0);
+       vnic_rq_fill(&enic->rq[rq], enic_rq_alloc_buf);
+       if (enic->rx_coalesce_setting.use_adaptive_rx_coalesce)
+               enic_calc_int_moderation(enic, &enic->rq[rq]);
+       enic_poll_unlock_poll(&enic->rq[rq]);
+
+       return work_done;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
+static int enic_poll_msix_wq(struct napi_struct *napi, int budget)
+{
+       struct net_device *netdev = napi->dev;
+       struct enic *enic = netdev_priv(netdev);
+       unsigned int wq_index = (napi - &enic->napi[0]) - enic->rq_count;
+       struct vnic_wq *wq = &enic->wq[wq_index];
+       unsigned int cq;
+       unsigned int intr;
+       unsigned int wq_work_to_do = -1; /* clean all desc possible */
+       unsigned int wq_work_done;
+       unsigned int wq_irq;
+
+       wq_irq = wq->index;
+       cq = enic_cq_wq(enic, wq_irq);
+       intr = enic_msix_wq_intr(enic, wq_irq);
+       wq_work_done = vnic_cq_service(&enic->cq[cq], wq_work_to_do,
+                                      enic_wq_service, NULL);
+
+       vnic_intr_return_credits(&enic->intr[intr], wq_work_done,
+                                0 /* don't unmask intr */,
+                                1 /* reset intr timer */);
+       if (!wq_work_done) {
+               napi_complete(napi);
+               vnic_intr_unmask(&enic->intr[intr]);
+       }
+
+       return 0;
+}
+
+static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
 {
        struct net_device *netdev = napi->dev;
        struct enic *enic = netdev_priv(netdev);
@@ -1203,6 +1289,8 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
        unsigned int work_done = 0;
        int err;
 
+       if (!enic_poll_lock_napi(&enic->rq[rq]))
+               return work_done;
        /* Service RQ
         */
 
@@ -1248,6 +1336,7 @@ static int enic_poll_msix(struct napi_struct *napi, int budget)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
+       enic_poll_unlock_napi(&enic->rq[rq]);
 
        return work_done;
 }
@@ -1267,6 +1356,7 @@ static void enic_free_intr(struct enic *enic)
        struct net_device *netdev = enic->netdev;
        unsigned int i;
 
+       enic_free_rx_cpu_rmap(enic);
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        case VNIC_DEV_INTR_MODE_INTX:
                free_irq(enic->pdev->irq, netdev);
@@ -1291,6 +1381,7 @@ static int enic_request_intr(struct enic *enic)
        unsigned int i, intr;
        int err = 0;
 
+       enic_set_rx_cpu_rmap(enic);
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
 
        case VNIC_DEV_INTR_MODE_INTX:
@@ -1312,17 +1403,19 @@ static int enic_request_intr(struct enic *enic)
                        snprintf(enic->msix[intr].devname,
                                sizeof(enic->msix[intr].devname),
                                "%.11s-rx-%d", netdev->name, i);
-                       enic->msix[intr].isr = enic_isr_msix_rq;
+                       enic->msix[intr].isr = enic_isr_msix;
                        enic->msix[intr].devid = &enic->napi[i];
                }
 
                for (i = 0; i < enic->wq_count; i++) {
+                       int wq = enic_cq_wq(enic, i);
+
                        intr = enic_msix_wq_intr(enic, i);
                        snprintf(enic->msix[intr].devname,
                                sizeof(enic->msix[intr].devname),
                                "%.11s-tx-%d", netdev->name, i);
-                       enic->msix[intr].isr = enic_isr_msix_wq;
-                       enic->msix[intr].devid = enic;
+                       enic->msix[intr].isr = enic_isr_msix;
+                       enic->msix[intr].devid = &enic->napi[wq];
                }
 
                intr = enic_msix_err_intr(enic);
@@ -1421,7 +1514,7 @@ static int enic_dev_notify_set(struct enic *enic)
 {
        int err;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        case VNIC_DEV_INTR_MODE_INTX:
                err = vnic_dev_notify_set(enic->vdev,
@@ -1435,7 +1528,7 @@ static int enic_dev_notify_set(struct enic *enic)
                err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */);
                break;
        }
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -1494,15 +1587,20 @@ static int enic_open(struct net_device *netdev)
 
        netif_tx_wake_all_queues(netdev);
 
-       for (i = 0; i < enic->rq_count; i++)
+       for (i = 0; i < enic->rq_count; i++) {
+               enic_busy_poll_init_lock(&enic->rq[i]);
                napi_enable(&enic->napi[i]);
-
+       }
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               for (i = 0; i < enic->wq_count; i++)
+                       napi_enable(&enic->napi[enic_cq_wq(enic, i)]);
        enic_dev_enable(enic);
 
        for (i = 0; i < enic->intr_count; i++)
                vnic_intr_unmask(&enic->intr[i]);
 
        enic_notify_timer_start(enic);
+       enic_rfs_flw_tbl_init(enic);
 
        return 0;
 
@@ -1529,14 +1627,23 @@ static int enic_stop(struct net_device *netdev)
        enic_synchronize_irqs(enic);
 
        del_timer_sync(&enic->notify_timer);
+       enic_rfs_flw_tbl_free(enic);
 
        enic_dev_disable(enic);
 
-       for (i = 0; i < enic->rq_count; i++)
+       local_bh_disable();
+       for (i = 0; i < enic->rq_count; i++) {
                napi_disable(&enic->napi[i]);
+               while (!enic_poll_lock_napi(&enic->rq[i]))
+                       mdelay(1);
+       }
+       local_bh_enable();
 
        netif_carrier_off(netdev);
        netif_tx_disable(netdev);
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               for (i = 0; i < enic->wq_count; i++)
+                       napi_disable(&enic->napi[enic_cq_wq(enic, i)]);
 
        if (!enic_is_dynamic(enic) && !enic_is_sriov_vf(enic))
                enic_dev_del_station_addr(enic);
@@ -1656,13 +1763,14 @@ static void enic_poll_controller(struct net_device *netdev)
        case VNIC_DEV_INTR_MODE_MSIX:
                for (i = 0; i < enic->rq_count; i++) {
                        intr = enic_msix_rq_intr(enic, i);
-                       enic_isr_msix_rq(enic->msix_entry[intr].vector,
-                               &enic->napi[i]);
+                       enic_isr_msix(enic->msix_entry[intr].vector,
+                                     &enic->napi[i]);
                }
 
                for (i = 0; i < enic->wq_count; i++) {
                        intr = enic_msix_wq_intr(enic, i);
-                       enic_isr_msix_wq(enic->msix_entry[intr].vector, enic);
+                       enic_isr_msix(enic->msix_entry[intr].vector,
+                                     &enic->napi[enic_cq_wq(enic, i)]);
                }
 
                break;
@@ -1758,11 +1866,11 @@ static int enic_set_rsskey(struct enic *enic)
 
        memcpy(rss_key_buf_va, &rss_key, sizeof(union vnic_rss_key));
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_set_rss_key(enic,
                rss_key_buf_pa,
                sizeof(union vnic_rss_key));
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        pci_free_consistent(enic->pdev, sizeof(union vnic_rss_key),
                rss_key_buf_va, rss_key_buf_pa);
@@ -1785,11 +1893,11 @@ static int enic_set_rsscpu(struct enic *enic, u8 rss_hash_bits)
        for (i = 0; i < (1 << rss_hash_bits); i++)
                (*rss_cpu_buf_va).cpu[i/4].b[i%4] = i % enic->rq_count;
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_set_rss_cpu(enic,
                rss_cpu_buf_pa,
                sizeof(union vnic_rss_cpu));
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        pci_free_consistent(enic->pdev, sizeof(union vnic_rss_cpu),
                rss_cpu_buf_va, rss_cpu_buf_pa);
@@ -1807,13 +1915,13 @@ static int enic_set_niccfg(struct enic *enic, u8 rss_default_cpu,
        /* Enable VLAN tag stripping.
        */
 
-       spin_lock(&enic->devcmd_lock);
+       spin_lock_bh(&enic->devcmd_lock);
        err = enic_set_nic_cfg(enic,
                rss_default_cpu, rss_hash_type,
                rss_hash_bits, rss_base_cpu,
                rss_enable, tso_ipid_split_en,
                ig_vlan_strip_en);
-       spin_unlock(&enic->devcmd_lock);
+       spin_unlock_bh(&enic->devcmd_lock);
 
        return err;
 }
@@ -2021,6 +2129,12 @@ static const struct net_device_ops enic_netdev_dynamic_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = enic_poll_controller,
 #endif
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = enic_rx_flow_steer,
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = enic_busy_poll,
+#endif
 };
 
 static const struct net_device_ops enic_netdev_ops = {
@@ -2041,14 +2155,25 @@ static const struct net_device_ops enic_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = enic_poll_controller,
 #endif
+#ifdef CONFIG_RFS_ACCEL
+       .ndo_rx_flow_steer      = enic_rx_flow_steer,
+#endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = enic_busy_poll,
+#endif
 };
 
 static void enic_dev_deinit(struct enic *enic)
 {
        unsigned int i;
 
-       for (i = 0; i < enic->rq_count; i++)
+       for (i = 0; i < enic->rq_count; i++) {
+               napi_hash_del(&enic->napi[i]);
                netif_napi_del(&enic->napi[i]);
+       }
+       if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX)
+               for (i = 0; i < enic->wq_count; i++)
+                       netif_napi_del(&enic->napi[enic_cq_wq(enic, i)]);
 
        enic_free_vnic_resources(enic);
        enic_clear_intr_mode(enic);
@@ -2114,11 +2239,17 @@ static int enic_dev_init(struct enic *enic)
        switch (vnic_dev_get_intr_mode(enic->vdev)) {
        default:
                netif_napi_add(netdev, &enic->napi[0], enic_poll, 64);
+               napi_hash_add(&enic->napi[0]);
                break;
        case VNIC_DEV_INTR_MODE_MSIX:
-               for (i = 0; i < enic->rq_count; i++)
+               for (i = 0; i < enic->rq_count; i++) {
                        netif_napi_add(netdev, &enic->napi[i],
-                               enic_poll_msix, 64);
+                               enic_poll_msix_rq, NAPI_POLL_WEIGHT);
+                       napi_hash_add(&enic->napi[i]);
+               }
+               for (i = 0; i < enic->wq_count; i++)
+                       netif_napi_add(netdev, &enic->napi[enic_cq_wq(enic, i)],
+                                      enic_poll_msix_wq, NAPI_POLL_WEIGHT);
                break;
        }
 
@@ -2386,6 +2517,10 @@ static int enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        netdev->features |= netdev->hw_features;
 
+#ifdef CONFIG_RFS_ACCEL
+       netdev->hw_features |= NETIF_F_NTUPLE;
+#endif
+
        if (using_dac)
                netdev->features |= NETIF_F_HIGHDMA;
 
index 31d6588..9c96911 100644 (file)
@@ -71,6 +71,7 @@ int enic_get_vnic_config(struct enic *enic)
        GET_CONFIG(intr_mode);
        GET_CONFIG(intr_timer_usec);
        GET_CONFIG(loop_tag);
+       GET_CONFIG(num_arfs);
 
        c->wq_desc_count =
                min_t(u32, ENIC_MAX_WQ_DESCS,
index e86a45c..5abc496 100644 (file)
@@ -312,12 +312,12 @@ static int _vnic_dev_cmd(struct vnic_dev *vdev, enum vnic_devcmd_cmd cmd,
                                err = (int)readq(&devcmd->args[0]);
                                if (err == ERR_EINVAL &&
                                    cmd == CMD_CAPABILITY)
-                                       return err;
+                                       return -err;
                                if (err != ERR_ECMDUNKNOWN ||
                                    cmd != CMD_CAPABILITY)
                                        pr_err("Error %d devcmd %d\n",
                                                err, _CMD_N(cmd));
-                               return err;
+                               return -err;
                        }
 
                        if (_CMD_DIR(cmd) & _CMD_DIR_READ) {
@@ -1048,3 +1048,64 @@ int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr)
 
        return vnic_dev_cmd(vdev, CMD_SET_MAC_ADDR, &a0, &a1, wait);
 }
+
+/* vnic_dev_classifier: Add/Delete classifier entries
+ * @vdev: vdev of the device
+ * @cmd: CLSF_ADD for Add filter
+ *      CLSF_DEL for Delete filter
+ * @entry: In case of ADD filter, the caller passes the RQ number in this
+ *        variable.
+ *
+ *        This function stores the filter_id returned by the firmware in the
+ *        same variable before return;
+ *
+ *        In case of DEL filter, the caller passes the RQ number. Return
+ *        value is irrelevant.
+ * @data: filter data
+ */
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+                       struct filter *data)
+{
+       u64 a0, a1;
+       int wait = 1000;
+       dma_addr_t tlv_pa;
+       int ret = -EINVAL;
+       struct filter_tlv *tlv, *tlv_va;
+       struct filter_action *action;
+       u64 tlv_size;
+
+       if (cmd == CLSF_ADD) {
+               tlv_size = sizeof(struct filter) +
+                          sizeof(struct filter_action) +
+                          2 * sizeof(struct filter_tlv);
+               tlv_va = pci_alloc_consistent(vdev->pdev, tlv_size, &tlv_pa);
+               if (!tlv_va)
+                       return -ENOMEM;
+               tlv = tlv_va;
+               a0 = tlv_pa;
+               a1 = tlv_size;
+               memset(tlv, 0, tlv_size);
+               tlv->type = CLSF_TLV_FILTER;
+               tlv->length = sizeof(struct filter);
+               *(struct filter *)&tlv->val = *data;
+
+               tlv = (struct filter_tlv *)((char *)tlv +
+                                           sizeof(struct filter_tlv) +
+                                           sizeof(struct filter));
+
+               tlv->type = CLSF_TLV_ACTION;
+               tlv->length = sizeof(struct filter_action);
+               action = (struct filter_action *)&tlv->val;
+               action->type = FILTER_ACTION_RQ_STEERING;
+               action->u.rq_idx = *entry;
+
+               ret = vnic_dev_cmd(vdev, CMD_ADD_FILTER, &a0, &a1, wait);
+               *entry = (u16)a0;
+               pci_free_consistent(vdev->pdev, tlv_size, tlv_va, tlv_pa);
+       } else if (cmd == CLSF_DEL) {
+               a0 = *entry;
+               ret = vnic_dev_cmd(vdev, CMD_DEL_FILTER, &a0, &a1, wait);
+       }
+
+       return ret;
+}
index 1f3b301..1fb214e 100644 (file)
@@ -133,5 +133,7 @@ int vnic_dev_enable2(struct vnic_dev *vdev, int active);
 int vnic_dev_enable2_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_deinit_done(struct vnic_dev *vdev, int *status);
 int vnic_dev_set_mac_addr(struct vnic_dev *vdev, u8 *mac_addr);
+int vnic_dev_classifier(struct vnic_dev *vdev, u8 cmd, u16 *entry,
+                       struct filter *data);
 
 #endif /* _VNIC_DEV_H_ */
index b9a0d78..435d0cd 100644 (file)
@@ -603,6 +603,11 @@ struct filter_tlv {
        u_int32_t val[0];
 };
 
+enum {
+       CLSF_ADD = 0,
+       CLSF_DEL = 1,
+};
+
 /*
  * Writing cmd register causes STAT_BUSY to get set in status register.
  * When cmd completes, STAT_BUSY will be cleared.
index 6095428..75aced2 100644 (file)
@@ -32,6 +32,8 @@ struct vnic_enet_config {
        char devname[16];
        u32 intr_timer_usec;
        u16 loop_tag;
+       u16 vf_rq_count;
+       u16 num_arfs;
 };
 
 #define VENETF_TSO             0x1     /* TSO enabled */
index ee7bc95..8111d52 100644 (file)
@@ -85,6 +85,21 @@ struct vnic_rq {
        struct vnic_rq_buf *to_clean;
        void *os_buf_head;
        unsigned int pkts_outstanding;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+#define ENIC_POLL_STATE_IDLE           0
+#define ENIC_POLL_STATE_NAPI           (1 << 0) /* NAPI owns this poll */
+#define ENIC_POLL_STATE_POLL           (1 << 1) /* poll owns this poll */
+#define ENIC_POLL_STATE_NAPI_YIELD     (1 << 2) /* NAPI yielded this poll */
+#define ENIC_POLL_STATE_POLL_YIELD     (1 << 3) /* poll yielded this poll */
+#define ENIC_POLL_YIELD                        (ENIC_POLL_STATE_NAPI_YIELD |   \
+                                        ENIC_POLL_STATE_POLL_YIELD)
+#define ENIC_POLL_LOCKED               (ENIC_POLL_STATE_NAPI |         \
+                                        ENIC_POLL_STATE_POLL)
+#define ENIC_POLL_USER_PEND            (ENIC_POLL_STATE_POLL |         \
+                                        ENIC_POLL_STATE_POLL_YIELD)
+       unsigned int bpoll_state;
+       spinlock_t bpoll_lock;
+#endif /* CONFIG_NET_RX_BUSY_POLL */
 };
 
 static inline unsigned int vnic_rq_desc_avail(struct vnic_rq *rq)
@@ -197,6 +212,113 @@ static inline int vnic_rq_fill(struct vnic_rq *rq,
        return 0;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
+{
+       spin_lock_init(&rq->bpoll_lock);
+       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+}
+
+static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
+{
+       bool rc = true;
+
+       spin_lock(&rq->bpoll_lock);
+       if (rq->bpoll_state & ENIC_POLL_LOCKED) {
+               WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
+               rq->bpoll_state |= ENIC_POLL_STATE_NAPI_YIELD;
+               rc = false;
+       } else {
+               rq->bpoll_state = ENIC_POLL_STATE_NAPI;
+       }
+       spin_unlock(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+{
+       bool rc = false;
+
+       spin_lock(&rq->bpoll_lock);
+       WARN_ON(rq->bpoll_state &
+               (ENIC_POLL_STATE_POLL | ENIC_POLL_STATE_NAPI_YIELD));
+       if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
+               rc = true;
+       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+       spin_unlock(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
+{
+       bool rc = true;
+
+       spin_lock_bh(&rq->bpoll_lock);
+       if (rq->bpoll_state & ENIC_POLL_LOCKED) {
+               rq->bpoll_state |= ENIC_POLL_STATE_POLL_YIELD;
+               rc = false;
+       } else {
+               rq->bpoll_state |= ENIC_POLL_STATE_POLL;
+       }
+       spin_unlock_bh(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
+{
+       bool rc = false;
+
+       spin_lock_bh(&rq->bpoll_lock);
+       WARN_ON(rq->bpoll_state & ENIC_POLL_STATE_NAPI);
+       if (rq->bpoll_state & ENIC_POLL_STATE_POLL_YIELD)
+               rc = true;
+       rq->bpoll_state = ENIC_POLL_STATE_IDLE;
+       spin_unlock_bh(&rq->bpoll_lock);
+
+       return rc;
+}
+
+static inline bool enic_poll_busy_polling(struct vnic_rq *rq)
+{
+       WARN_ON(!(rq->bpoll_state & ENIC_POLL_LOCKED));
+       return rq->bpoll_state & ENIC_POLL_USER_PEND;
+}
+
+#else
+
+static inline void enic_busy_poll_init_lock(struct vnic_rq *rq)
+{
+}
+
+static inline bool enic_poll_lock_napi(struct vnic_rq *rq)
+{
+       return true;
+}
+
+static inline bool enic_poll_unlock_napi(struct vnic_rq *rq)
+{
+       return false;
+}
+
+static inline bool enic_poll_lock_poll(struct vnic_rq *rq)
+{
+       return false;
+}
+
+static inline bool enic_poll_unlock_poll(struct vnic_rq *rq)
+{
+       return false;
+}
+
+static inline bool enic_poll_ll_polling(struct vnic_rq *rq)
+{
+       return false;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 void vnic_rq_free(struct vnic_rq *rq);
 int vnic_rq_alloc(struct vnic_dev *vdev, struct vnic_rq *rq, unsigned int index,
        unsigned int desc_count, unsigned int desc_size);
index 13723c9..23084fb 100644 (file)
@@ -817,10 +817,12 @@ dm9000_release_board(struct platform_device *pdev, struct board_info *db)
 
        /* release the resources */
 
-       release_resource(db->data_req);
+       if (db->data_req)
+               release_resource(db->data_req);
        kfree(db->data_req);
 
-       release_resource(db->addr_req);
+       if (db->addr_req)
+               release_resource(db->addr_req);
        kfree(db->addr_req);
 }
 
index c05b66d..7091fa6 100644 (file)
@@ -3250,7 +3250,6 @@ srom_map_media(struct net_device *dev)
        printk("%s: Bad media code [%d] detected in SROM!\n", dev->name,
                                                          lp->infoblock_media);
        return -1;
-       break;
     }
 
     return 0;
index 768379b..523d9dd 100644 (file)
@@ -158,7 +158,7 @@ void comet_timer(unsigned long data)
 {
        struct net_device *dev = (struct net_device *)data;
        struct tulip_private *tp = netdev_priv(dev);
-       int next_tick = 60*HZ;
+       int next_tick = 2*HZ;
 
        if (tulip_debug > 1)
                netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
index e9b0fab..a379c3e 100644 (file)
@@ -323,7 +323,8 @@ static int dnet_mii_init(struct dnet *bp)
 
        bp->mii_bus->priv = bp;
 
-       bp->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
+       bp->mii_bus->irq = devm_kmalloc(&bp->pdev->dev,
+                                       sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
        if (!bp->mii_bus->irq) {
                err = -ENOMEM;
                goto err_out;
@@ -334,7 +335,7 @@ static int dnet_mii_init(struct dnet *bp)
 
        if (mdiobus_register(bp->mii_bus)) {
                err = -ENXIO;
-               goto err_out_free_mdio_irq;
+               goto err_out;
        }
 
        if (dnet_mii_probe(bp->dev) != 0) {
@@ -346,8 +347,6 @@ static int dnet_mii_init(struct dnet *bp)
 
 err_out_unregister_bus:
        mdiobus_unregister(bp->mii_bus);
-err_out_free_mdio_irq:
-       kfree(bp->mii_bus->irq);
 err_out:
        mdiobus_free(bp->mii_bus);
        return err;
@@ -825,28 +824,14 @@ static int dnet_probe(struct platform_device *pdev)
        struct net_device *dev;
        struct dnet *bp;
        struct phy_device *phydev;
-       int err = -ENXIO;
-       unsigned int mem_base, mem_size, irq;
+       int err;
+       unsigned int irq;
 
-       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       if (!res) {
-               dev_err(&pdev->dev, "no mmio resource defined\n");
-               goto err_out;
-       }
-       mem_base = res->start;
-       mem_size = resource_size(res);
        irq = platform_get_irq(pdev, 0);
 
-       if (!request_mem_region(mem_base, mem_size, DRV_NAME)) {
-               dev_err(&pdev->dev, "no memory region available\n");
-               err = -EBUSY;
-               goto err_out;
-       }
-
-       err = -ENOMEM;
        dev = alloc_etherdev(sizeof(*bp));
        if (!dev)
-               goto err_out_release_mem;
+               return -ENOMEM;
 
        /* TODO: Actually, we have some interesting features... */
        dev->features |= 0;
@@ -859,10 +844,10 @@ static int dnet_probe(struct platform_device *pdev)
 
        spin_lock_init(&bp->lock);
 
-       bp->regs = ioremap(mem_base, mem_size);
-       if (!bp->regs) {
-               dev_err(&pdev->dev, "failed to map registers, aborting.\n");
-               err = -ENOMEM;
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       bp->regs = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(bp->regs)) {
+               err = PTR_ERR(bp->regs);
                goto err_out_free_dev;
        }
 
@@ -871,7 +856,7 @@ static int dnet_probe(struct platform_device *pdev)
        if (err) {
                dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
                       irq, err);
-               goto err_out_iounmap;
+               goto err_out_free_dev;
        }
 
        dev->netdev_ops = &dnet_netdev_ops;
@@ -908,7 +893,7 @@ static int dnet_probe(struct platform_device *pdev)
                goto err_out_unregister_netdev;
 
        dev_info(&pdev->dev, "Dave DNET at 0x%p (0x%08x) irq %d %pM\n",
-              bp->regs, mem_base, dev->irq, dev->dev_addr);
+              bp->regs, (unsigned int)res->start, dev->irq, dev->dev_addr);
        dev_info(&pdev->dev, "has %smdio, %sirq, %sgigabit, %sdma\n",
               (bp->capabilities & DNET_HAS_MDIO) ? "" : "no ",
               (bp->capabilities & DNET_HAS_IRQ) ? "" : "no ",
@@ -925,13 +910,8 @@ err_out_unregister_netdev:
        unregister_netdev(dev);
 err_out_free_irq:
        free_irq(dev->irq, dev);
-err_out_iounmap:
-       iounmap(bp->regs);
 err_out_free_dev:
        free_netdev(dev);
-err_out_release_mem:
-       release_mem_region(mem_base, mem_size);
-err_out:
        return err;
 }
 
@@ -948,11 +928,9 @@ static int dnet_remove(struct platform_device *pdev)
                if (bp->phy_dev)
                        phy_disconnect(bp->phy_dev);
                mdiobus_unregister(bp->mii_bus);
-               kfree(bp->mii_bus->irq);
                mdiobus_free(bp->mii_bus);
                unregister_netdev(dev);
                free_irq(dev->irq, dev);
-               iounmap(bp->regs);
                free_netdev(dev);
        }
 
index 2e7c555..811f135 100644 (file)
@@ -34,7 +34,7 @@
 #include "be_hw.h"
 #include "be_roce.h"
 
-#define DRV_VER                        "10.2u"
+#define DRV_VER                        "10.4u"
 #define DRV_NAME               "be2net"
 #define BE_NAME                        "Emulex BladeEngine2"
 #define BE3_NAME               "Emulex BladeEngine3"
@@ -372,6 +372,7 @@ enum vf_state {
 };
 
 #define BE_FLAGS_LINK_STATUS_INIT              1
+#define BE_FLAGS_SRIOV_ENABLED                 (1 << 2)
 #define BE_FLAGS_WORKER_SCHEDULED              (1 << 3)
 #define BE_FLAGS_VLAN_PROMISC                  (1 << 4)
 #define BE_FLAGS_MCAST_PROMISC                 (1 << 5)
@@ -382,8 +383,10 @@ enum vf_state {
 
 #define BE_UC_PMAC_COUNT                       30
 #define BE_VF_UC_PMAC_COUNT                    2
+
 /* Ethtool set_dump flags */
 #define LANCER_INITIATE_FW_DUMP                        0x1
+#define LANCER_DELETE_FW_DUMP                  0x2
 
 struct phy_info {
        u8 transceiver;
@@ -411,6 +414,7 @@ struct be_resources {
        u16 max_vlans;          /* Number of vlans supported */
        u16 max_evt_qs;
        u32 if_cap_flags;
+       u32 vf_if_cap_flags;    /* VF if capability flags */
 };
 
 struct rss_info {
@@ -500,6 +504,7 @@ struct be_adapter {
        u32 flash_status;
        struct completion et_cmd_compl;
 
+       struct be_resources pool_res;   /* resources available for the port */
        struct be_resources res;        /* resources available for the func */
        u16 num_vfs;                    /* Number of VFs provisioned by PF */
        u8 virtfn;
@@ -523,9 +528,9 @@ struct be_adapter {
 
 #define be_physfn(adapter)             (!adapter->virtfn)
 #define be_virtfn(adapter)             (adapter->virtfn)
-#define        sriov_enabled(adapter)          (adapter->num_vfs > 0)
-#define sriov_want(adapter)             (be_physfn(adapter) && \
-                                        (num_vfs || pci_num_vf(adapter->pdev)))
+#define sriov_enabled(adapter)         (adapter->flags &       \
+                                        BE_FLAGS_SRIOV_ENABLED)
+
 #define for_all_vfs(adapter, vf_cfg, i)                                        \
        for (i = 0, vf_cfg = &adapter->vf_cfg[i]; i < adapter->num_vfs; \
                i++, vf_cfg++)
@@ -536,7 +541,7 @@ struct be_adapter {
 #define be_max_vlans(adapter)          (adapter->res.max_vlans)
 #define be_max_uc(adapter)             (adapter->res.max_uc_mac)
 #define be_max_mc(adapter)             (adapter->res.max_mcast_mac)
-#define be_max_vfs(adapter)            (adapter->res.max_vfs)
+#define be_max_vfs(adapter)            (adapter->pool_res.max_vfs)
 #define be_max_rss(adapter)            (adapter->res.max_rss_qs)
 #define be_max_txqs(adapter)           (adapter->res.max_tx_qs)
 #define be_max_prio_txqs(adapter)      (adapter->res.max_prio_tx_qs)
@@ -557,9 +562,7 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
 #define be_pvid_tagging_enabled(adapter)       (adapter->pvid)
 
 /* Is BE in QNQ multi-channel mode */
-#define be_is_qnq_mode(adapter)                (adapter->mc_type == FLEX10 ||  \
-                                        adapter->mc_type == vNIC1 ||   \
-                                        adapter->mc_type == UFP)
+#define be_is_qnq_mode(adapter)                (adapter->function_mode & QNQ_MODE)
 
 #define lancer_chip(adapter)   (adapter->pdev->device == OC_DEVICE_ID3 || \
                                 adapter->pdev->device == OC_DEVICE_ID4)
@@ -673,6 +676,8 @@ static inline void swap_dws(void *wrb, int len)
 #endif                         /* __BIG_ENDIAN */
 }
 
+#define be_cmd_status(status)          (status > 0 ? -EIO : status)
+
 static inline u8 is_tcp_pkt(struct sk_buff *skb)
 {
        u8 val = 0;
index f4ea349..4370ec1 100644 (file)
@@ -1749,8 +1749,7 @@ err:
 }
 
 /* Uses synchronous mcc */
-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-                     char *fw_on_flash)
+int be_cmd_get_fw_ver(struct be_adapter *adapter)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_get_fw_version *req;
@@ -1772,9 +1771,8 @@ int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
        status = be_mcc_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
-               strcpy(fw_ver, resp->firmware_version_string);
-               if (fw_on_flash)
-                       strcpy(fw_on_flash, resp->fw_on_flash_version_string);
+               strcpy(adapter->fw_ver, resp->firmware_version_string);
+               strcpy(adapter->fw_on_flash, resp->fw_on_flash_version_string);
        }
 err:
        spin_unlock_bh(&adapter->mcc_lock);
@@ -1997,8 +1995,7 @@ err:
 }
 
 /* Uses mbox */
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
-                       u32 *mode, u32 *caps, u16 *asic_rev)
+int be_cmd_query_fw_cfg(struct be_adapter *adapter)
 {
        struct be_mcc_wrb *wrb;
        struct be_cmd_req_query_fw_cfg *req;
@@ -2017,10 +2014,10 @@ int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
        status = be_mbox_notify_wait(adapter);
        if (!status) {
                struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
-               *port_num = le32_to_cpu(resp->phys_port);
-               *mode = le32_to_cpu(resp->function_mode);
-               *caps = le32_to_cpu(resp->function_caps);
-               *asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
+               adapter->port_num = le32_to_cpu(resp->phys_port);
+               adapter->function_mode = le32_to_cpu(resp->function_mode);
+               adapter->function_caps = le32_to_cpu(resp->function_caps);
+               adapter->asic_rev = le32_to_cpu(resp->asic_revision) & 0xFF;
        }
 
        mutex_unlock(&adapter->mbox_lock);
@@ -2224,7 +2221,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
                                         msecs_to_jiffies(60000)))
-               status = -1;
+               status = -ETIMEDOUT;
        else
                status = adapter->flash_status;
 
@@ -2243,6 +2240,34 @@ err_unlock:
        return status;
 }
 
+int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name)
+{
+       struct lancer_cmd_req_delete_object *req;
+       struct be_mcc_wrb *wrb;
+       int status;
+
+       spin_lock_bh(&adapter->mcc_lock);
+
+       wrb = wrb_from_mccq(adapter);
+       if (!wrb) {
+               status = -EBUSY;
+               goto err;
+       }
+
+       req = embedded_payload(wrb);
+
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                              OPCODE_COMMON_DELETE_OBJECT,
+                              sizeof(*req), wrb, NULL);
+
+       strcpy(req->object_name, obj_name);
+
+       status = be_mcc_notify_wait(adapter);
+err:
+       spin_unlock_bh(&adapter->mcc_lock);
+       return status;
+}
+
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
                           u32 data_size, u32 data_offset, const char *obj_name,
                           u32 *data_read, u32 *eof, u8 *addn_status)
@@ -2320,7 +2345,7 @@ int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
 
        if (!wait_for_completion_timeout(&adapter->et_cmd_compl,
                                         msecs_to_jiffies(40000)))
-               status = -1;
+               status = -ETIMEDOUT;
        else
                status = adapter->flash_status;
 
@@ -3313,15 +3338,28 @@ err:
        return status;
 }
 
-static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
+/* Descriptor type */
+enum {
+       FUNC_DESC = 1,
+       VFT_DESC = 2
+};
+
+static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count,
+                                              int desc_type)
 {
        struct be_res_desc_hdr *hdr = (struct be_res_desc_hdr *)buf;
+       struct be_nic_res_desc *nic;
        int i;
 
        for (i = 0; i < desc_count; i++) {
                if (hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V0 ||
-                   hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1)
-                       return (struct be_nic_res_desc *)hdr;
+                   hdr->desc_type == NIC_RESOURCE_DESC_TYPE_V1) {
+                       nic = (struct be_nic_res_desc *)hdr;
+                       if (desc_type == FUNC_DESC ||
+                           (desc_type == VFT_DESC &&
+                            nic->flags & (1 << VFT_SHIFT)))
+                               return nic;
+               }
 
                hdr->desc_len = hdr->desc_len ? : RESOURCE_DESC_SIZE_V0;
                hdr = (void *)hdr + hdr->desc_len;
@@ -3329,6 +3367,16 @@ static struct be_nic_res_desc *be_get_nic_desc(u8 *buf, u32 desc_count)
        return NULL;
 }
 
+static struct be_nic_res_desc *be_get_vft_desc(u8 *buf, u32 desc_count)
+{
+       return be_get_nic_desc(buf, desc_count, VFT_DESC);
+}
+
+static struct be_nic_res_desc *be_get_func_nic_desc(u8 *buf, u32 desc_count)
+{
+       return be_get_nic_desc(buf, desc_count, FUNC_DESC);
+}
+
 static struct be_pcie_res_desc *be_get_pcie_desc(u8 devfn, u8 *buf,
                                                 u32 desc_count)
 {
@@ -3424,7 +3472,7 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
                u32 desc_count = le32_to_cpu(resp->desc_count);
                struct be_nic_res_desc *desc;
 
-               desc = be_get_nic_desc(resp->func_param, desc_count);
+               desc = be_get_func_nic_desc(resp->func_param, desc_count);
                if (!desc) {
                        status = -EINVAL;
                        goto err;
@@ -3440,76 +3488,17 @@ err:
        return status;
 }
 
-/* Uses mbox */
-static int be_cmd_get_profile_config_mbox(struct be_adapter *adapter,
-                                         u8 domain, struct be_dma_mem *cmd)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_get_profile_config *req;
-       int status;
-
-       if (mutex_lock_interruptible(&adapter->mbox_lock))
-               return -1;
-       wrb = wrb_from_mbox(adapter);
-
-       req = cmd->va;
-       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_GET_PROFILE_CONFIG,
-                              cmd->size, wrb, cmd);
-
-       req->type = ACTIVE_PROFILE_TYPE;
-       req->hdr.domain = domain;
-       if (!lancer_chip(adapter))
-               req->hdr.version = 1;
-
-       status = be_mbox_notify_wait(adapter);
-
-       mutex_unlock(&adapter->mbox_lock);
-       return status;
-}
-
-/* Uses sync mcc */
-static int be_cmd_get_profile_config_mccq(struct be_adapter *adapter,
-                                         u8 domain, struct be_dma_mem *cmd)
-{
-       struct be_mcc_wrb *wrb;
-       struct be_cmd_req_get_profile_config *req;
-       int status;
-
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
-
-       req = cmd->va;
-       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_GET_PROFILE_CONFIG,
-                              cmd->size, wrb, cmd);
-
-       req->type = ACTIVE_PROFILE_TYPE;
-       req->hdr.domain = domain;
-       if (!lancer_chip(adapter))
-               req->hdr.version = 1;
-
-       status = be_mcc_notify_wait(adapter);
-
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
-       return status;
-}
-
-/* Uses sync mcc, if MCCQ is already created otherwise mbox */
+/* Will use MBOX only if MCCQ has not been created */
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain)
 {
        struct be_cmd_resp_get_profile_config *resp;
+       struct be_cmd_req_get_profile_config *req;
+       struct be_nic_res_desc *vf_res;
        struct be_pcie_res_desc *pcie;
        struct be_port_res_desc *port;
        struct be_nic_res_desc *nic;
-       struct be_queue_info *mccq = &adapter->mcc_obj.q;
+       struct be_mcc_wrb wrb = {0};
        struct be_dma_mem cmd;
        u32 desc_count;
        int status;
@@ -3520,10 +3509,17 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        if (!cmd.va)
                return -ENOMEM;
 
-       if (!mccq->created)
-               status = be_cmd_get_profile_config_mbox(adapter, domain, &cmd);
-       else
-               status = be_cmd_get_profile_config_mccq(adapter, domain, &cmd);
+       req = cmd.va;
+       be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
+                              OPCODE_COMMON_GET_PROFILE_CONFIG,
+                              cmd.size, &wrb, &cmd);
+
+       req->hdr.domain = domain;
+       if (!lancer_chip(adapter))
+               req->hdr.version = 1;
+       req->type = ACTIVE_PROFILE_TYPE;
+
+       status = be_cmd_notify_wait(adapter, &wrb);
        if (status)
                goto err;
 
@@ -3539,48 +3535,52 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
        if (port)
                adapter->mc_type = port->mc_type;
 
-       nic = be_get_nic_desc(resp->func_param, desc_count);
+       nic = be_get_func_nic_desc(resp->func_param, desc_count);
        if (nic)
                be_copy_nic_desc(res, nic);
 
+       vf_res = be_get_vft_desc(resp->func_param, desc_count);
+       if (vf_res)
+               res->vf_if_cap_flags = vf_res->cap_flags;
 err:
        if (cmd.va)
                pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
-int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
-                             int size, u8 version, u8 domain)
+/* Will use MBOX only if MCCQ has not been created */
+static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
+                                    int size, int count, u8 version, u8 domain)
 {
        struct be_cmd_req_set_profile_config *req;
-       struct be_mcc_wrb *wrb;
+       struct be_mcc_wrb wrb = {0};
+       struct be_dma_mem cmd;
        int status;
 
-       spin_lock_bh(&adapter->mcc_lock);
-
-       wrb = wrb_from_mccq(adapter);
-       if (!wrb) {
-               status = -EBUSY;
-               goto err;
-       }
+       memset(&cmd, 0, sizeof(struct be_dma_mem));
+       cmd.size = sizeof(struct be_cmd_req_set_profile_config);
+       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       if (!cmd.va)
+               return -ENOMEM;
 
-       req = embedded_payload(wrb);
+       req = cmd.va;
        be_wrb_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
-                              OPCODE_COMMON_SET_PROFILE_CONFIG, sizeof(*req),
-                              wrb, NULL);
+                              OPCODE_COMMON_SET_PROFILE_CONFIG, cmd.size,
+                              &wrb, &cmd);
        req->hdr.version = version;
        req->hdr.domain = domain;
-       req->desc_count = cpu_to_le32(1);
+       req->desc_count = cpu_to_le32(count);
        memcpy(req->desc, desc, size);
 
-       status = be_mcc_notify_wait(adapter);
-err:
-       spin_unlock_bh(&adapter->mcc_lock);
+       status = be_cmd_notify_wait(adapter, &wrb);
+
+       if (cmd.va)
+               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
 /* Mark all fields invalid */
-void be_reset_nic_desc(struct be_nic_res_desc *nic)
+static void be_reset_nic_desc(struct be_nic_res_desc *nic)
 {
        memset(nic, 0, sizeof(*nic));
        nic->unicast_mac_count = 0xFFFF;
@@ -3601,9 +3601,20 @@ void be_reset_nic_desc(struct be_nic_res_desc *nic)
        nic->wol_param = 0x0F;
        nic->tunnel_iface_count = 0xFFFF;
        nic->direct_tenant_iface_count = 0xFFFF;
+       nic->bw_min = 0xFFFFFFFF;
        nic->bw_max = 0xFFFFFFFF;
 }
 
+/* Mark all fields invalid */
+static void be_reset_pcie_desc(struct be_pcie_res_desc *pcie)
+{
+       memset(pcie, 0, sizeof(*pcie));
+       pcie->sriov_state = 0xFF;
+       pcie->pf_state = 0xFF;
+       pcie->pf_type = 0xFF;
+       pcie->num_vfs = 0xFFFF;
+}
+
 int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
                      u8 domain)
 {
@@ -3634,7 +3645,63 @@ int be_cmd_config_qos(struct be_adapter *adapter, u32 max_rate, u16 link_speed,
 
        return be_cmd_set_profile_config(adapter, &nic_desc,
                                         nic_desc.hdr.desc_len,
-                                        version, domain);
+                                        1, version, domain);
+}
+
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+                           struct be_resources res, u16 num_vfs)
+{
+       struct {
+               struct be_pcie_res_desc pcie;
+               struct be_nic_res_desc nic_vft;
+       } __packed desc;
+       u16 vf_q_count;
+
+       if (BEx_chip(adapter) || lancer_chip(adapter))
+               return 0;
+
+       /* PF PCIE descriptor */
+       be_reset_pcie_desc(&desc.pcie);
+       desc.pcie.hdr.desc_type = PCIE_RESOURCE_DESC_TYPE_V1;
+       desc.pcie.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+       desc.pcie.flags = (1 << IMM_SHIFT) | (1 << NOSV_SHIFT);
+       desc.pcie.pf_num = adapter->pdev->devfn;
+       desc.pcie.sriov_state = num_vfs ? 1 : 0;
+       desc.pcie.num_vfs = cpu_to_le16(num_vfs);
+
+       /* VF NIC Template descriptor */
+       be_reset_nic_desc(&desc.nic_vft);
+       desc.nic_vft.hdr.desc_type = NIC_RESOURCE_DESC_TYPE_V1;
+       desc.nic_vft.hdr.desc_len = RESOURCE_DESC_SIZE_V1;
+       desc.nic_vft.flags = (1 << VFT_SHIFT) | (1 << IMM_SHIFT) |
+                               (1 << NOSV_SHIFT);
+       desc.nic_vft.pf_num = adapter->pdev->devfn;
+       desc.nic_vft.vf_num = 0;
+
+       if (num_vfs && res.vf_if_cap_flags & BE_IF_FLAGS_RSS) {
+               /* If number of VFs requested is 8 less than max supported,
+                * assign 8 queue pairs to the PF and divide the remaining
+                * resources evenly among the VFs
+                */
+               if (num_vfs < (be_max_vfs(adapter) - 8))
+                       vf_q_count = (res.max_rss_qs - 8) / num_vfs;
+               else
+                       vf_q_count = res.max_rss_qs / num_vfs;
+
+               desc.nic_vft.rq_count = cpu_to_le16(vf_q_count);
+               desc.nic_vft.txq_count = cpu_to_le16(vf_q_count);
+               desc.nic_vft.rssq_count = cpu_to_le16(vf_q_count - 1);
+               desc.nic_vft.cq_count = cpu_to_le16(3 * vf_q_count);
+       } else {
+               desc.nic_vft.txq_count = cpu_to_le16(1);
+               desc.nic_vft.rq_count = cpu_to_le16(1);
+               desc.nic_vft.rssq_count = cpu_to_le16(0);
+               /* One CQ for each TX, RX and MCCQ */
+               desc.nic_vft.cq_count = cpu_to_le16(3);
+       }
+
+       return be_cmd_set_profile_config(adapter, &desc,
+                                        2 * RESOURCE_DESC_SIZE_V1, 2, 1, 0);
 }
 
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op)
@@ -3686,7 +3753,7 @@ int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port)
        }
 
        return be_cmd_set_profile_config(adapter, &port_desc,
-                                        RESOURCE_DESC_SIZE_V1, 1, 0);
+                                        RESOURCE_DESC_SIZE_V1, 1, 1, 0);
 }
 
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
@@ -3766,13 +3833,19 @@ bool dump_present(struct be_adapter *adapter)
 
 int lancer_initiate_dump(struct be_adapter *adapter)
 {
+       struct device *dev = &adapter->pdev->dev;
        int status;
 
+       if (dump_present(adapter)) {
+               dev_info(dev, "Previous dump not cleared, not forcing dump\n");
+               return -EEXIST;
+       }
+
        /* give firmware reset and diagnostic dump */
        status = lancer_physdev_ctrl(adapter, PHYSDEV_CONTROL_FW_RESET_MASK |
                                     PHYSDEV_CONTROL_DD_MASK);
        if (status < 0) {
-               dev_err(&adapter->pdev->dev, "Firmware reset failed\n");
+               dev_err(dev, "FW reset failed\n");
                return status;
        }
 
@@ -3781,13 +3854,21 @@ int lancer_initiate_dump(struct be_adapter *adapter)
                return status;
 
        if (!dump_present(adapter)) {
-               dev_err(&adapter->pdev->dev, "Dump image not present\n");
-               return -1;
+               dev_err(dev, "FW dump not generated\n");
+               return -EIO;
        }
 
        return 0;
 }
 
+int lancer_delete_dump(struct be_adapter *adapter)
+{
+       int status;
+
+       status = lancer_cmd_delete_object(adapter, LANCER_FW_DUMP_FILE);
+       return be_cmd_status(status);
+}
+
 /* Uses sync mcc */
 int be_cmd_enable_vf(struct be_adapter *adapter, u8 domain)
 {
index 3e0a6b2..5284b82 100644 (file)
@@ -231,6 +231,7 @@ struct be_mcc_mailbox {
 #define OPCODE_COMMON_GET_FN_PRIVILEGES                        170
 #define OPCODE_COMMON_READ_OBJECT                      171
 #define OPCODE_COMMON_WRITE_OBJECT                     172
+#define OPCODE_COMMON_DELETE_OBJECT                    174
 #define OPCODE_COMMON_MANAGE_IFACE_FILTERS             193
 #define OPCODE_COMMON_GET_IFACE_LIST                   194
 #define OPCODE_COMMON_ENABLE_DISABLE_VF                        196
@@ -1081,17 +1082,12 @@ struct be_cmd_req_modify_eq_delay {
        struct be_set_eqd set_eqd[MAX_EVT_QS];
 } __packed;
 
-struct be_cmd_resp_modify_eq_delay {
-       struct be_cmd_resp_hdr hdr;
-       u32 rsvd0;
-} __packed;
-
 /******************** Get FW Config *******************/
 /* The HW can come up in either of the following multi-channel modes
  * based on the skew/IPL.
  */
 #define RDMA_ENABLED                           0x4
-#define FLEX10_MODE                            0x400
+#define QNQ_MODE                               0x400
 #define VNIC_MODE                              0x20000
 #define UMC_ENABLED                            0x1000000
 struct be_cmd_req_query_fw_cfg {
@@ -1156,11 +1152,6 @@ struct be_cmd_req_enable_disable_beacon {
        u8  status_duration;
 } __packed;
 
-struct be_cmd_resp_enable_disable_beacon {
-       struct be_cmd_resp_hdr resp_hdr;
-       u32 rsvd0;
-} __packed;
-
 struct be_cmd_req_get_beacon_state {
        struct be_cmd_req_hdr hdr;
        u8  port_num;
@@ -1263,6 +1254,13 @@ struct lancer_cmd_resp_read_object {
        u32 eof;
 };
 
+struct lancer_cmd_req_delete_object {
+       struct be_cmd_req_hdr hdr;
+       u32 rsvd1;
+       u32 rsvd2;
+       u8 object_name[104];
+};
+
 /************************ WOL *******************************/
 struct be_cmd_req_acpi_wol_magic_config{
        struct be_cmd_req_hdr hdr;
@@ -1326,11 +1324,6 @@ struct be_cmd_req_set_lmode {
        u8 loopback_state;
 };
 
-struct be_cmd_resp_set_lmode {
-       struct be_cmd_resp_hdr resp_hdr;
-       u8 rsvd0[4];
-};
-
 /********************** DDR DMA test *********************/
 struct be_cmd_req_ddrdma_test {
        struct be_cmd_req_hdr hdr;
@@ -1434,11 +1427,6 @@ struct be_cmd_req_set_qos {
        u32 rsvd[7];
 };
 
-struct be_cmd_resp_set_qos {
-       struct be_cmd_resp_hdr hdr;
-       u32 rsvd;
-};
-
 /*********************** Controller Attributes ***********************/
 struct be_cmd_req_cntl_attribs {
        struct be_cmd_req_hdr hdr;
@@ -1572,11 +1560,6 @@ struct be_cmd_req_set_hsw_config {
        u8 context[sizeof(struct amap_set_hsw_context) / 8];
 } __packed;
 
-struct be_cmd_resp_set_hsw_config {
-       struct be_cmd_resp_hdr hdr;
-       u32 rsvd;
-};
-
 struct amap_get_hsw_req_context {
        u8 interface_id[16];
        u8 rsvd0[14];
@@ -1835,6 +1818,7 @@ struct be_cmd_req_set_ext_fat_caps {
 #define PORT_RESOURCE_DESC_TYPE_V1             0x55
 #define MAX_RESOURCE_DESC                      264
 
+#define VFT_SHIFT                              3       /* VF template */
 #define IMM_SHIFT                              6       /* Immediate */
 #define NOSV_SHIFT                             7       /* No save */
 
@@ -1962,12 +1946,8 @@ struct be_cmd_req_set_profile_config {
        struct be_cmd_req_hdr hdr;
        u32 rsvd;
        u32 desc_count;
-       u8 desc[RESOURCE_DESC_SIZE_V1];
-};
-
-struct be_cmd_resp_set_profile_config {
-       struct be_cmd_resp_hdr hdr;
-};
+       u8 desc[2 * RESOURCE_DESC_SIZE_V1];
+} __packed;
 
 struct be_cmd_req_get_active_profile {
        struct be_cmd_req_hdr hdr;
@@ -2070,16 +2050,14 @@ int be_cmd_reset(struct be_adapter *adapter);
 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd);
 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
                               struct be_dma_mem *nonemb_cmd);
-int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver,
-                     char *fw_on_flash);
+int be_cmd_get_fw_ver(struct be_adapter *adapter);
 int be_cmd_modify_eqd(struct be_adapter *adapter, struct be_set_eqd *, int num);
 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
                       u32 num);
 int be_cmd_rx_filter(struct be_adapter *adapter, u32 flags, u32 status);
 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc);
 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc);
-int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
-                       u32 *function_mode, u32 *function_caps, u16 *asic_rev);
+int be_cmd_query_fw_cfg(struct be_adapter *adapter);
 int be_cmd_reset_function(struct be_adapter *adapter);
 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable,
                      u32 rss_hash_opts, u16 table_size, const u8 *rss_hkey);
@@ -2097,6 +2075,7 @@ int lancer_cmd_write_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
 int lancer_cmd_read_object(struct be_adapter *adapter, struct be_dma_mem *cmd,
                           u32 data_size, u32 data_offset, const char *obj_name,
                           u32 *data_read, u32 *eof, u8 *addn_status);
+int lancer_cmd_delete_object(struct be_adapter *adapter, const char *obj_name);
 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
                          u16 optype, int offset);
 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
@@ -2150,6 +2129,7 @@ int be_cmd_set_ext_fat_capabilites(struct be_adapter *adapter,
                                   struct be_fat_conf_params *cfgs);
 int lancer_physdev_ctrl(struct be_adapter *adapter, u32 mask);
 int lancer_initiate_dump(struct be_adapter *adapter);
+int lancer_delete_dump(struct be_adapter *adapter);
 bool dump_present(struct be_adapter *adapter);
 int lancer_test_and_set_rdy_state(struct be_adapter *adapter);
 int be_cmd_query_port_name(struct be_adapter *adapter, u8 *port_name);
@@ -2157,8 +2137,6 @@ int be_cmd_get_func_config(struct be_adapter *adapter,
                           struct be_resources *res);
 int be_cmd_get_profile_config(struct be_adapter *adapter,
                              struct be_resources *res, u8 domain);
-int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
-                             int size, u8 version, u8 domain);
 int be_cmd_get_active_profile(struct be_adapter *adapter, u16 *profile);
 int be_cmd_get_if_id(struct be_adapter *adapter, struct be_vf_cfg *vf_cfg,
                     int vf_num);
@@ -2168,3 +2146,5 @@ int be_cmd_set_logical_link_config(struct be_adapter *adapter,
                                          int link_state, u8 domain);
 int be_cmd_set_vxlan_port(struct be_adapter *adapter, __be16 port);
 int be_cmd_manage_iface(struct be_adapter *adapter, u32 iface, u8 op);
+int be_cmd_set_sriov_config(struct be_adapter *adapter,
+                           struct be_resources res, u16 num_vfs);
index e2da4d2..0cd3311 100644 (file)
@@ -643,7 +643,7 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
        if (status)
                dev_warn(&adapter->pdev->dev, "Pause param set failed.\n");
 
-       return status;
+       return be_cmd_status(status);
 }
 
 static int be_set_phys_id(struct net_device *netdev,
@@ -681,22 +681,21 @@ static int be_set_dump(struct net_device *netdev, struct ethtool_dump *dump)
        struct device *dev = &adapter->pdev->dev;
        int status;
 
-       if (!lancer_chip(adapter)) {
-               dev_err(dev, "FW dump not supported\n");
+       if (!lancer_chip(adapter) ||
+           !check_privilege(adapter, MAX_PRIVILEGES))
                return -EOPNOTSUPP;
-       }
-
-       if (dump_present(adapter)) {
-               dev_err(dev, "Previous dump not cleared, not forcing dump\n");
-               return 0;
-       }
 
        switch (dump->flag) {
        case LANCER_INITIATE_FW_DUMP:
                status = lancer_initiate_dump(adapter);
                if (!status)
-                       dev_info(dev, "F/w dump initiated successfully\n");
+                       dev_info(dev, "FW dump initiated successfully\n");
                break;
+       case LANCER_DELETE_FW_DUMP:
+               status = lancer_delete_dump(adapter);
+               if (!status)
+                       dev_info(dev, "FW dump deleted successfully\n");
+       break;
        default:
                dev_err(dev, "Invalid dump level: 0x%x\n", dump->flag);
                return -EINVAL;
@@ -762,7 +761,7 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
 err:
        dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va,
                          ddrdma_cmd.dma);
-       return ret;
+       return be_cmd_status(ret);
 }
 
 static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type,
@@ -885,7 +884,7 @@ static int be_read_eeprom(struct net_device *netdev,
        dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va,
                          eeprom_cmd.dma);
 
-       return status;
+       return be_cmd_status(status);
 }
 
 static u32 be_get_msg_level(struct net_device *netdev)
@@ -1042,7 +1041,7 @@ static int be_set_rss_hash_opts(struct be_adapter *adapter,
        if (!status)
                adapter->rss_info.rss_flags = rss_flags;
 
-       return status;
+       return be_cmd_status(status);
 }
 
 static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
@@ -1080,6 +1079,7 @@ static int be_set_channels(struct net_device  *netdev,
                           struct ethtool_channels *ch)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
+       int status;
 
        if (ch->rx_count || ch->tx_count || ch->other_count ||
            !ch->combined_count || ch->combined_count > be_max_qs(adapter))
@@ -1087,7 +1087,8 @@ static int be_set_channels(struct net_device  *netdev,
 
        adapter->cfg_num_qs = ch->combined_count;
 
-       return be_update_queues(adapter);
+       status = be_update_queues(adapter);
+       return be_cmd_status(status);
 }
 
 static u32 be_get_rxfh_indir_size(struct net_device *netdev)
index 6822b3d..db4ff14 100644 (file)
@@ -81,10 +81,10 @@ static const char * const ue_status_low_desc[] = {
        "P1_OB_LINK ",
        "HOST_GPIO ",
        "MBOX ",
-       "AXGMAC0",
-       "AXGMAC1",
-       "JTAG",
-       "MPU_INTPEND"
+       "ERX2 ",
+       "SPARE ",
+       "JTAG ",
+       "MPU_INTPEND "
 };
 /* UE Status High CSR */
 static const char * const ue_status_hi_desc[] = {
@@ -109,16 +109,16 @@ static const char * const ue_status_hi_desc[] = {
        "HOST5",
        "HOST6",
        "HOST7",
-       "HOST8",
-       "HOST9",
+       "ECRC",
+       "Poison TLP",
        "NETC",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
-       "Unknown",
+       "PERIPH",
+       "LLTXULP",
+       "D2P",
+       "RCON",
+       "LDMA",
+       "LLTXP",
+       "LLTXPB",
        "Unknown"
 };
 
@@ -1172,20 +1172,15 @@ static int be_vlan_add_vid(struct net_device *netdev, __be16 proto, u16 vid)
 static int be_vlan_rem_vid(struct net_device *netdev, __be16 proto, u16 vid)
 {
        struct be_adapter *adapter = netdev_priv(netdev);
-       int status = 0;
 
        /* Packets with VID 0 are always received by Lancer by default */
        if (lancer_chip(adapter) && vid == 0)
-               goto ret;
+               return 0;
 
        clear_bit(vid, adapter->vids);
-       status = be_vid_config(adapter);
-       if (!status)
-               adapter->vlans_added--;
-       else
-               set_bit(vid, adapter->vids);
-ret:
-       return status;
+       adapter->vlans_added--;
+
+       return be_vid_config(adapter);
 }
 
 static void be_clear_promisc(struct be_adapter *adapter)
@@ -1275,6 +1270,12 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
        if (!is_valid_ether_addr(mac) || vf >= adapter->num_vfs)
                return -EINVAL;
 
+       /* Proceed further only if user provided MAC is different
+        * from active MAC
+        */
+       if (ether_addr_equal(mac, vf_cfg->mac_addr))
+               return 0;
+
        if (BEx_chip(adapter)) {
                be_cmd_pmac_del(adapter, vf_cfg->if_handle, vf_cfg->pmac_id,
                                vf + 1);
@@ -1286,13 +1287,15 @@ static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
                                        vf + 1);
        }
 
-       if (status)
-               dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
-                       mac, vf);
-       else
-               memcpy(vf_cfg->mac_addr, mac, ETH_ALEN);
+       if (status) {
+               dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed: %#x",
+                       mac, vf, status);
+               return be_cmd_status(status);
+       }
 
-       return status;
+       ether_addr_copy(vf_cfg->mac_addr, mac);
+
+       return 0;
 }
 
 static int be_get_vf_config(struct net_device *netdev, int vf,
@@ -1341,12 +1344,16 @@ static int be_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos)
                                               vf + 1, vf_cfg->if_handle, 0);
        }
 
-       if (!status)
-               vf_cfg->vlan_tag = vlan;
-       else
-               dev_info(&adapter->pdev->dev,
-                        "VLAN %d config on VF %d failed\n", vlan, vf);
-       return status;
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "VLAN %d config on VF %d failed : %#x\n", vlan,
+                       vf, status);
+               return be_cmd_status(status);
+       }
+
+       vf_cfg->vlan_tag = vlan;
+
+       return 0;
 }
 
 static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
@@ -1377,7 +1384,7 @@ static int be_set_vf_tx_rate(struct net_device *netdev, int vf,
 
        if (!link_status) {
                dev_err(dev, "TX-rate setting not allowed when link is down\n");
-               status = -EPERM;
+               status = -ENETDOWN;
                goto err;
        }
 
@@ -1408,7 +1415,7 @@ config_qos:
 err:
        dev_err(dev, "TX-rate setting of %dMbps on VF%d failed\n",
                max_tx_rate, vf);
-       return status;
+       return be_cmd_status(status);
 }
 static int be_set_vf_link_state(struct net_device *netdev, int vf,
                                int link_state)
@@ -1423,10 +1430,15 @@ static int be_set_vf_link_state(struct net_device *netdev, int vf,
                return -EINVAL;
 
        status = be_cmd_set_logical_link_config(adapter, link_state, vf+1);
-       if (!status)
-               adapter->vf_cfg[vf].plink_tracking = link_state;
+       if (status) {
+               dev_err(&adapter->pdev->dev,
+                       "Link state change on VF %d failed: %#x\n", vf, status);
+               return be_cmd_status(status);
+       }
 
-       return status;
+       adapter->vf_cfg[vf].plink_tracking = link_state;
+
+       return 0;
 }
 
 static void be_aic_update(struct be_aic_obj *aic, u64 rx_pkts, u64 tx_pkts,
@@ -2028,7 +2040,7 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
         */
        for (;;) {
                rxcp = be_rx_compl_get(rxo);
-               if (rxcp == NULL) {
+               if (!rxcp) {
                        if (lancer_chip(adapter))
                                break;
 
@@ -2902,7 +2914,7 @@ static int be_open(struct net_device *netdev)
        for_all_evt_queues(adapter, eqo, i) {
                napi_enable(&eqo->napi);
                be_enable_busy_poll(eqo);
-               be_eq_notify(adapter, eqo->q.id, true, false, 0);
+               be_eq_notify(adapter, eqo->q.id, true, true, 0);
        }
        adapter->flags |= BE_FLAGS_NAPI_ENABLED;
 
@@ -2935,8 +2947,8 @@ static int be_setup_wol(struct be_adapter *adapter, bool enable)
        cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
        cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
                                     GFP_KERNEL);
-       if (cmd.va == NULL)
-               return -1;
+       if (!cmd.va)
+               return -ENOMEM;
 
        if (enable) {
                status = pci_write_config_dword(adapter->pdev,
@@ -3043,6 +3055,7 @@ static void be_vf_clear(struct be_adapter *adapter)
 done:
        kfree(adapter->vf_cfg);
        adapter->num_vfs = 0;
+       adapter->flags &= ~BE_FLAGS_SRIOV_ENABLED;
 }
 
 static void be_clear_queues(struct be_adapter *adapter)
@@ -3098,6 +3111,13 @@ static int be_clear(struct be_adapter *adapter)
        if (sriov_enabled(adapter))
                be_vf_clear(adapter);
 
+       /* Re-configure FW to distribute resources evenly across max-supported
+        * number of VFs, only when VFs are not already enabled.
+        */
+       if (be_physfn(adapter) && !pci_vfs_assigned(adapter->pdev))
+               be_cmd_set_sriov_config(adapter, adapter->pool_res,
+                                       pci_sriov_get_totalvfs(adapter->pdev));
+
 #ifdef CONFIG_BE2NET_VXLAN
        be_disable_vxlan_offloads(adapter);
 #endif
@@ -3170,19 +3190,6 @@ static int be_vf_setup(struct be_adapter *adapter)
        u32 privileges;
 
        old_vfs = pci_num_vf(adapter->pdev);
-       if (old_vfs) {
-               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
-               if (old_vfs != num_vfs)
-                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
-               adapter->num_vfs = old_vfs;
-       } else {
-               if (num_vfs > be_max_vfs(adapter))
-                       dev_info(dev, "Device supports %d VFs and not %d\n",
-                                be_max_vfs(adapter), num_vfs);
-               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
-               if (!adapter->num_vfs)
-                       return 0;
-       }
 
        status = be_vf_setup_init(adapter);
        if (status)
@@ -3194,17 +3201,15 @@ static int be_vf_setup(struct be_adapter *adapter)
                        if (status)
                                goto err;
                }
-       } else {
-               status = be_vfs_if_create(adapter);
-               if (status)
-                       goto err;
-       }
 
-       if (old_vfs) {
                status = be_vfs_mac_query(adapter);
                if (status)
                        goto err;
        } else {
+               status = be_vfs_if_create(adapter);
+               if (status)
+                       goto err;
+
                status = be_vf_eth_addr_config(adapter);
                if (status)
                        goto err;
@@ -3243,6 +3248,8 @@ static int be_vf_setup(struct be_adapter *adapter)
                        goto err;
                }
        }
+
+       adapter->flags |= BE_FLAGS_SRIOV_ENABLED;
        return 0;
 err:
        dev_err(dev, "VF setup failed\n");
@@ -3254,9 +3261,9 @@ err:
 
 static u8 be_convert_mc_type(u32 function_mode)
 {
-       if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE)
+       if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
                return vNIC1;
-       else if (function_mode & FLEX10_MODE)
+       else if (function_mode & QNQ_MODE)
                return FLEX10;
        else if (function_mode & VNIC_MODE)
                return vNIC2;
@@ -3270,19 +3277,7 @@ static u8 be_convert_mc_type(u32 function_mode)
 static void BEx_get_resources(struct be_adapter *adapter,
                              struct be_resources *res)
 {
-       struct pci_dev *pdev = adapter->pdev;
-       bool use_sriov = false;
-       int max_vfs = 0;
-
-       if (be_physfn(adapter) && BE3_chip(adapter)) {
-               be_cmd_get_profile_config(adapter, res, 0);
-               /* Some old versions of BE3 FW don't report max_vfs value */
-               if (res->max_vfs == 0) {
-                       max_vfs = pci_sriov_get_totalvfs(pdev);
-                       res->max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
-               }
-               use_sriov = res->max_vfs && sriov_want(adapter);
-       }
+       bool use_sriov = adapter->num_vfs ? 1 : 0;
 
        if (be_physfn(adapter))
                res->max_uc_mac = BE_UC_PMAC_COUNT;
@@ -3326,7 +3321,7 @@ static void BEx_get_resources(struct be_adapter *adapter,
        res->max_rx_qs = res->max_rss_qs + 1;
 
        if (be_physfn(adapter))
-               res->max_evt_qs = (res->max_vfs > 0) ?
+               res->max_evt_qs = (be_max_vfs(adapter) > 0) ?
                                        BE3_SRIOV_MAX_EVT_QS : BE3_MAX_EVT_QS;
        else
                res->max_evt_qs = 1;
@@ -3349,6 +3344,51 @@ static void be_setup_init(struct be_adapter *adapter)
                adapter->cmd_privileges = MIN_PRIVILEGES;
 }
 
+static int be_get_sriov_config(struct be_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       struct be_resources res = {0};
+       int max_vfs, old_vfs;
+
+       /* Some old versions of BE3 FW don't report max_vfs value */
+       be_cmd_get_profile_config(adapter, &res, 0);
+
+       if (BE3_chip(adapter) && !res.max_vfs) {
+               max_vfs = pci_sriov_get_totalvfs(adapter->pdev);
+               res.max_vfs = max_vfs > 0 ? min(MAX_VFS, max_vfs) : 0;
+       }
+
+       adapter->pool_res = res;
+
+       if (!be_max_vfs(adapter)) {
+               if (num_vfs)
+                       dev_warn(dev, "device doesn't support SRIOV\n");
+               adapter->num_vfs = 0;
+               return 0;
+       }
+
+       pci_sriov_set_totalvfs(adapter->pdev, be_max_vfs(adapter));
+
+       /* validate num_vfs module param */
+       old_vfs = pci_num_vf(adapter->pdev);
+       if (old_vfs) {
+               dev_info(dev, "%d VFs are already enabled\n", old_vfs);
+               if (old_vfs != num_vfs)
+                       dev_warn(dev, "Ignoring num_vfs=%d setting\n", num_vfs);
+               adapter->num_vfs = old_vfs;
+       } else {
+               if (num_vfs > be_max_vfs(adapter)) {
+                       dev_info(dev, "Resources unavailable to init %d VFs\n",
+                                num_vfs);
+                       dev_info(dev, "Limiting to %d VFs\n",
+                                be_max_vfs(adapter));
+               }
+               adapter->num_vfs = min_t(u16, num_vfs, be_max_vfs(adapter));
+       }
+
+       return 0;
+}
+
 static int be_get_resources(struct be_adapter *adapter)
 {
        struct device *dev = &adapter->pdev->dev;
@@ -3374,13 +3414,6 @@ static int be_get_resources(struct be_adapter *adapter)
                        res.max_evt_qs /= 2;
                adapter->res = res;
 
-               if (be_physfn(adapter)) {
-                       status = be_cmd_get_profile_config(adapter, &res, 0);
-                       if (status)
-                               return status;
-                       adapter->res.max_vfs = res.max_vfs;
-               }
-
                dev_info(dev, "Max: txqs %d, rxqs %d, rss %d, eqs %d, vfs %d\n",
                         be_max_txqs(adapter), be_max_rxqs(adapter),
                         be_max_rss(adapter), be_max_eqs(adapter),
@@ -3393,16 +3426,41 @@ static int be_get_resources(struct be_adapter *adapter)
        return 0;
 }
 
-/* Routine to query per function resource limits */
+static void be_sriov_config(struct be_adapter *adapter)
+{
+       struct device *dev = &adapter->pdev->dev;
+       int status;
+
+       status = be_get_sriov_config(adapter);
+       if (status) {
+               dev_err(dev, "Failed to query SR-IOV configuration\n");
+               dev_err(dev, "SR-IOV cannot be enabled\n");
+               return;
+       }
+
+       /* When the HW is in SRIOV capable configuration, the PF-pool
+        * resources are equally distributed across the max-number of
+        * VFs. The user may request only a subset of the max-vfs to be
+        * enabled. Based on num_vfs, redistribute the resources across
+        * num_vfs so that each VF will have access to more number of
+        * resources. This facility is not available in BE3 FW.
+        * Also, this is done by FW in Lancer chip.
+        */
+       if (be_max_vfs(adapter) && !pci_num_vf(adapter->pdev)) {
+               status = be_cmd_set_sriov_config(adapter,
+                                                adapter->pool_res,
+                                                adapter->num_vfs);
+               if (status)
+                       dev_err(dev, "Failed to optimize SR-IOV resources\n");
+       }
+}
+
 static int be_get_config(struct be_adapter *adapter)
 {
        u16 profile_id;
        int status;
 
-       status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
-                                    &adapter->function_mode,
-                                    &adapter->function_caps,
-                                    &adapter->asic_rev);
+       status = be_cmd_query_fw_cfg(adapter);
        if (status)
                return status;
 
@@ -3413,6 +3471,9 @@ static int be_get_config(struct be_adapter *adapter)
                                 "Using profile 0x%x\n", profile_id);
        }
 
+       if (!BE2_chip(adapter) && be_physfn(adapter))
+               be_sriov_config(adapter);
+
        status = be_get_resources(adapter);
        if (status)
                return status;
@@ -3571,7 +3632,7 @@ static int be_setup(struct be_adapter *adapter)
        if (status)
                goto err;
 
-       be_cmd_get_fw_ver(adapter, adapter->fw_ver, adapter->fw_on_flash);
+       be_cmd_get_fw_ver(adapter);
 
        if (BE2_chip(adapter) && fw_major_num(adapter->fw_ver) < 4) {
                dev_err(dev, "Firmware on card is old(%s), IRQs may not work.",
@@ -3596,12 +3657,8 @@ static int be_setup(struct be_adapter *adapter)
                be_cmd_set_logical_link_config(adapter,
                                               IFLA_VF_LINK_STATE_AUTO, 0);
 
-       if (sriov_want(adapter)) {
-               if (be_max_vfs(adapter))
-                       be_vf_setup(adapter);
-               else
-                       dev_warn(dev, "device doesn't support SRIOV\n");
-       }
+       if (adapter->num_vfs)
+               be_vf_setup(adapter);
 
        status = be_cmd_get_phy_info(adapter);
        if (!status && be_pause_supported(adapter))
@@ -3925,7 +3982,7 @@ static int be_flash_skyhawk(struct be_adapter *adapter,
        fsec = get_fsec_info(adapter, filehdr_size + img_hdrs_size, fw);
        if (!fsec) {
                dev_err(dev, "Invalid Cookie. FW image may be corrupted\n");
-               return -1;
+               return -EINVAL;
        }
 
        for (i = 0; i < le32_to_cpu(fsec->fsec_hdr.num_images); i++) {
@@ -4094,7 +4151,7 @@ lancer_fw_exit:
 static int be_get_ufi_type(struct be_adapter *adapter,
                           struct flash_file_hdr_g3 *fhdr)
 {
-       if (fhdr == NULL)
+       if (!fhdr)
                goto be_get_ufi_exit;
 
        if (skyhawk_chip(adapter) && fhdr->build[0] == '4')
@@ -4156,7 +4213,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
                                                              &flash_cmd,
                                                              num_imgs);
                                else {
-                                       status = -1;
+                                       status = -EINVAL;
                                        dev_err(&adapter->pdev->dev,
                                                "Can't load BE3 UFI on BE3R\n");
                                }
@@ -4167,7 +4224,7 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
        if (ufi_type == UFI_TYPE2)
                status = be_flash_BEx(adapter, fw, &flash_cmd, 0);
        else if (ufi_type == -1)
-               status = -1;
+               status = -EINVAL;
 
        dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
                          flash_cmd.dma);
@@ -4190,7 +4247,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
        if (!netif_running(adapter->netdev)) {
                dev_err(&adapter->pdev->dev,
                        "Firmware load not allowed (interface is down)\n");
-               return -1;
+               return -ENETDOWN;
        }
 
        status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
@@ -4205,8 +4262,7 @@ int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
                status = be_fw_download(adapter, fw);
 
        if (!status)
-               be_cmd_get_fw_ver(adapter, adapter->fw_ver,
-                                 adapter->fw_on_flash);
+               be_cmd_get_fw_ver(adapter);
 
 fw_exit:
        release_firmware(fw);
@@ -4437,12 +4493,12 @@ static int be_map_pci_bars(struct be_adapter *adapter)
 
        if (BEx_chip(adapter) && be_physfn(adapter)) {
                adapter->csr = pci_iomap(adapter->pdev, 2, 0);
-               if (adapter->csr == NULL)
+               if (!adapter->csr)
                        return -ENOMEM;
        }
 
        addr = pci_iomap(adapter->pdev, db_bar(adapter), 0);
-       if (addr == NULL)
+       if (!addr)
                goto pci_map_err;
        adapter->db = addr;
 
@@ -4505,7 +4561,7 @@ static int be_ctrl_init(struct be_adapter *adapter)
        rx_filter->va = dma_zalloc_coherent(&adapter->pdev->dev,
                                            rx_filter->size, &rx_filter->dma,
                                            GFP_KERNEL);
-       if (rx_filter->va == NULL) {
+       if (!rx_filter->va) {
                status = -ENOMEM;
                goto free_mbox;
        }
@@ -4554,8 +4610,8 @@ static int be_stats_init(struct be_adapter *adapter)
 
        cmd->va = dma_zalloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
                                      GFP_KERNEL);
-       if (cmd->va == NULL)
-               return -1;
+       if (!cmd->va)
+               return -ENOMEM;
        return 0;
 }
 
@@ -4776,7 +4832,7 @@ static int be_probe(struct pci_dev *pdev, const struct pci_device_id *pdev_id)
        pci_set_master(pdev);
 
        netdev = alloc_etherdev_mqs(sizeof(*adapter), MAX_TX_QS, MAX_RX_QS);
-       if (netdev == NULL) {
+       if (!netdev) {
                status = -ENOMEM;
                goto rel_reg;
        }
index 671d080..bd53caf 100644 (file)
@@ -256,12 +256,6 @@ struct bufdesc_ex {
 #define FLAG_RX_CSUM_ENABLED   (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 #define FLAG_RX_CSUM_ERROR     (BD_ENET_RX_ICE | BD_ENET_RX_PCR)
 
-struct fec_enet_delayed_work {
-       struct delayed_work delay_work;
-       bool timeout;
-       bool trig_tx;
-};
-
 /* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
  * tx_bd_base always point to the base of the buffer descriptors.  The
  * cur_rx and cur_tx point to the currently available buffer.
@@ -308,7 +302,6 @@ struct fec_enet_private {
 
        struct  platform_device *pdev;
 
-       int     opened;
        int     dev_id;
 
        /* Phylib and MDIO interface */
@@ -328,6 +321,8 @@ struct fec_enet_private {
        struct  napi_struct napi;
        int     csum_flags;
 
+       struct work_struct tx_timeout_work;
+
        struct ptp_clock *ptp_clock;
        struct ptp_clock_info ptp_caps;
        unsigned long last_overflow_check;
@@ -340,7 +335,6 @@ struct fec_enet_private {
        int hwts_rx_en;
        int hwts_tx_en;
        struct timer_list time_keep;
-       struct fec_enet_delayed_work delay_work;
        struct regulator *reg_phy;
 };
 
index 38d9d27..66fe1f6 100644 (file)
@@ -320,6 +320,32 @@ static void *swap_buffer(void *bufaddr, int len)
        return bufaddr;
 }
 
+static void fec_dump(struct net_device *ndev)
+{
+       struct fec_enet_private *fep = netdev_priv(ndev);
+       struct bufdesc *bdp = fep->tx_bd_base;
+       unsigned int index = 0;
+
+       netdev_info(ndev, "TX ring dump\n");
+       pr_info("Nr     SC     addr       len  SKB\n");
+
+       do {
+               pr_info("%3u %c%c 0x%04x 0x%08lx %4u %p\n",
+                       index,
+                       bdp == fep->cur_tx ? 'S' : ' ',
+                       bdp == fep->dirty_tx ? 'H' : ' ',
+                       bdp->cbd_sc, bdp->cbd_bufaddr, bdp->cbd_datlen,
+                       fep->tx_skbuff[index]);
+               bdp = fec_enet_get_nextdesc(bdp, fep);
+               index++;
+       } while (bdp != fep->tx_bd_base);
+}
+
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+       return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
 static int
 fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
 {
@@ -330,28 +356,13 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
        if (unlikely(skb_cow_head(skb, 0)))
                return -1;
 
-       ip_hdr(skb)->check = 0;
+       if (is_ipv4_pkt(skb))
+               ip_hdr(skb)->check = 0;
        *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
 
        return 0;
 }
 
-static void
-fec_enet_submit_work(struct bufdesc *bdp, struct fec_enet_private *fep)
-{
-       const struct platform_device_id *id_entry =
-                               platform_get_device_id(fep->pdev);
-       struct bufdesc *bdp_pre;
-
-       bdp_pre = fec_enet_get_prevdesc(bdp, fep);
-       if ((id_entry->driver_data & FEC_QUIRK_ERR006358) &&
-           !(bdp_pre->cbd_sc & BD_ENET_TX_READY)) {
-               fep->delay_work.trig_tx = true;
-               schedule_delayed_work(&(fep->delay_work.delay_work),
-                                       msecs_to_jiffies(1));
-       }
-}
-
 static int
 fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
 {
@@ -367,6 +378,7 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
        skb_frag_t *this_frag;
        unsigned int index;
        void *bufaddr;
+       dma_addr_t addr;
        int i;
 
        for (frag = 0; frag < nr_frags; frag++) {
@@ -409,15 +421,16 @@ fec_enet_txq_submit_frag_skb(struct sk_buff *skb, struct net_device *ndev)
                                swap_buffer(bufaddr, frag_len);
                }
 
-               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
-                                               frag_len, DMA_TO_DEVICE);
-               if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+               addr = dma_map_single(&fep->pdev->dev, bufaddr, frag_len,
+                                     DMA_TO_DEVICE);
+               if (dma_mapping_error(&fep->pdev->dev, addr)) {
                        dev_kfree_skb_any(skb);
                        if (net_ratelimit())
                                netdev_err(ndev, "Tx DMA memory map failed\n");
                        goto dma_mapping_error;
                }
 
+               bdp->cbd_bufaddr = addr;
                bdp->cbd_datlen = frag_len;
                bdp->cbd_sc = status;
        }
@@ -444,6 +457,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        int nr_frags = skb_shinfo(skb)->nr_frags;
        struct bufdesc *bdp, *last_bdp;
        void *bufaddr;
+       dma_addr_t addr;
        unsigned short status;
        unsigned short buflen;
        unsigned int estatus = 0;
@@ -484,12 +498,9 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
                        swap_buffer(bufaddr, buflen);
        }
 
-       /* Push the data cache so the CPM does not get stale memory
-        * data.
-        */
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, bufaddr,
-                                       buflen, DMA_TO_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       /* Push the data cache so the CPM does not get stale memory data. */
+       addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
+       if (dma_mapping_error(&fep->pdev->dev, addr)) {
                dev_kfree_skb_any(skb);
                if (net_ratelimit())
                        netdev_err(ndev, "Tx DMA memory map failed\n");
@@ -531,6 +542,7 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        fep->tx_skbuff[index] = skb;
 
        bdp->cbd_datlen = buflen;
+       bdp->cbd_bufaddr = addr;
 
        /* Send it on its way.  Tell FEC it's ready, interrupt when done,
         * it's the last BD of the frame, and to put the CRC on the end.
@@ -538,8 +550,6 @@ static int fec_enet_txq_submit_skb(struct sk_buff *skb, struct net_device *ndev)
        status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
        bdp->cbd_sc = status;
 
-       fec_enet_submit_work(bdp, fep);
-
        /* If this was the last BD in the ring, start at the beginning again. */
        bdp = fec_enet_get_nextdesc(last_bdp, fep);
 
@@ -564,12 +574,12 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
        unsigned short status;
        unsigned int estatus = 0;
+       dma_addr_t addr;
 
        status = bdp->cbd_sc;
        status &= ~BD_ENET_TX_STATS;
 
        status |= (BD_ENET_TX_TC | BD_ENET_TX_READY);
-       bdp->cbd_datlen = size;
 
        if (((unsigned long) data) & FEC_ALIGNMENT ||
                id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) {
@@ -580,15 +590,17 @@ fec_enet_txq_put_data_tso(struct sk_buff *skb, struct net_device *ndev,
                        swap_buffer(data, size);
        }
 
-       bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
-                                       size, DMA_TO_DEVICE);
-       if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
+       addr = dma_map_single(&fep->pdev->dev, data, size, DMA_TO_DEVICE);
+       if (dma_mapping_error(&fep->pdev->dev, addr)) {
                dev_kfree_skb_any(skb);
                if (net_ratelimit())
                        netdev_err(ndev, "Tx DMA memory map failed\n");
                return NETDEV_TX_BUSY;
        }
 
+       bdp->cbd_datlen = size;
+       bdp->cbd_bufaddr = addr;
+
        if (fep->bufdesc_ex) {
                if (skb->ip_summed == CHECKSUM_PARTIAL)
                        estatus |= BD_ENET_TX_PINS | BD_ENET_TX_IINS;
@@ -726,8 +738,6 @@ static int fec_enet_txq_submit_tso(struct sk_buff *skb, struct net_device *ndev)
        /* Save skb pointer */
        fep->tx_skbuff[index] = skb;
 
-       fec_enet_submit_work(bdp, fep);
-
        skb_tx_timestamp(skb);
        fep->cur_tx = bdp;
 
@@ -795,7 +805,7 @@ static void fec_enet_bd_init(struct net_device *dev)
 
                /* Initialize the BD for every fragment in the page. */
                bdp->cbd_sc = 0;
-               if (bdp->cbd_bufaddr && fep->tx_skbuff[i]) {
+               if (fep->tx_skbuff[i]) {
                        dev_kfree_skb_any(fep->tx_skbuff[i]);
                        fep->tx_skbuff[i] = NULL;
                }
@@ -809,12 +819,13 @@ static void fec_enet_bd_init(struct net_device *dev)
        fep->dirty_tx = bdp;
 }
 
-/* This function is called to start or restart the FEC during a link
- * change.  This only happens when switching between half and full
- * duplex.
+/*
+ * This function is called to start or restart the FEC during a link
+ * change, transmit timeout, or to reconfigure the FEC.  The network
+ * packet processing for this device must be stopped before this call.
  */
 static void
-fec_restart(struct net_device *ndev, int duplex)
+fec_restart(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
        const struct platform_device_id *id_entry =
@@ -825,13 +836,6 @@ fec_restart(struct net_device *ndev, int duplex)
        u32 rcntl = OPT_FRAME_SIZE | 0x04;
        u32 ecntl = 0x2; /* ETHEREN */
 
-       if (netif_running(ndev)) {
-               netif_device_detach(ndev);
-               napi_disable(&fep->napi);
-               netif_stop_queue(ndev);
-               netif_tx_lock_bh(ndev);
-       }
-
        /* Whack a reset.  We should wait for this. */
        writel(1, fep->hwp + FEC_ECNTRL);
        udelay(10);
@@ -872,7 +876,7 @@ fec_restart(struct net_device *ndev, int duplex)
        }
 
        /* Enable MII mode */
-       if (duplex) {
+       if (fep->full_duplex == DUPLEX_FULL) {
                /* FD enable */
                writel(0x04, fep->hwp + FEC_X_CNTRL);
        } else {
@@ -881,8 +885,6 @@ fec_restart(struct net_device *ndev, int duplex)
                writel(0x0, fep->hwp + FEC_X_CNTRL);
        }
 
-       fep->full_duplex = duplex;
-
        /* Set MII speed */
        writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
 
@@ -1000,13 +1002,6 @@ fec_restart(struct net_device *ndev, int duplex)
 
        /* Enable interrupts we wish to service */
        writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
-
-       if (netif_running(ndev)) {
-               netif_tx_unlock_bh(ndev);
-               netif_wake_queue(ndev);
-               napi_enable(&fep->napi);
-               netif_device_attach(ndev);
-       }
 }
 
 static void
@@ -1044,29 +1039,44 @@ fec_timeout(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       fec_dump(ndev);
+
        ndev->stats.tx_errors++;
 
-       fep->delay_work.timeout = true;
-       schedule_delayed_work(&(fep->delay_work.delay_work), 0);
+       schedule_work(&fep->tx_timeout_work);
 }
 
-static void fec_enet_work(struct work_struct *work)
+static void fec_enet_timeout_work(struct work_struct *work)
 {
        struct fec_enet_private *fep =
-               container_of(work,
-                            struct fec_enet_private,
-                            delay_work.delay_work.work);
+               container_of(work, struct fec_enet_private, tx_timeout_work);
+       struct net_device *ndev = fep->netdev;
 
-       if (fep->delay_work.timeout) {
-               fep->delay_work.timeout = false;
-               fec_restart(fep->netdev, fep->full_duplex);
-               netif_wake_queue(fep->netdev);
+       rtnl_lock();
+       if (netif_device_present(ndev) || netif_running(ndev)) {
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(ndev);
+               fec_restart(ndev);
+               netif_wake_queue(ndev);
+               netif_tx_unlock_bh(ndev);
+               napi_enable(&fep->napi);
        }
+       rtnl_unlock();
+}
 
-       if (fep->delay_work.trig_tx) {
-               fep->delay_work.trig_tx = false;
-               writel(0, fep->hwp + FEC_X_DES_ACTIVE);
-       }
+static void
+fec_enet_hwtstamp(struct fec_enet_private *fep, unsigned ts,
+       struct skb_shared_hwtstamps *hwtstamps)
+{
+       unsigned long flags;
+       u64 ns;
+
+       spin_lock_irqsave(&fep->tmreg_lock, flags);
+       ns = timecounter_cyc2time(&fep->tc, ts);
+       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+       memset(hwtstamps, 0, sizeof(*hwtstamps));
+       hwtstamps->hwtstamp = ns_to_ktime(ns);
 }
 
 static void
@@ -1094,6 +1104,7 @@ fec_enet_tx(struct net_device *ndev)
                index = fec_enet_get_bd_index(fep->tx_bd_base, bdp, fep);
 
                skb = fep->tx_skbuff[index];
+               fep->tx_skbuff[index] = NULL;
                if (!IS_TSO_HEADER(fep, bdp->cbd_bufaddr))
                        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        bdp->cbd_datlen, DMA_TO_DEVICE);
@@ -1126,20 +1137,12 @@ fec_enet_tx(struct net_device *ndev)
                if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
                        fep->bufdesc_ex) {
                        struct skb_shared_hwtstamps shhwtstamps;
-                       unsigned long flags;
                        struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
 
-                       memset(&shhwtstamps, 0, sizeof(shhwtstamps));
-                       spin_lock_irqsave(&fep->tmreg_lock, flags);
-                       shhwtstamps.hwtstamp = ns_to_ktime(
-                               timecounter_cyc2time(&fep->tc, ebdp->ts));
-                       spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+                       fec_enet_hwtstamp(fep, ebdp->ts, &shhwtstamps);
                        skb_tstamp_tx(skb, &shhwtstamps);
                }
 
-               if (status & BD_ENET_TX_READY)
-                       netdev_err(ndev, "HEY! Enet xmit interrupt and TX_READY\n");
-
                /* Deferred means some collisions occurred during transmit,
                 * but we eventually sent the packet OK.
                 */
@@ -1148,7 +1151,6 @@ fec_enet_tx(struct net_device *ndev)
 
                /* Free the sk buffer associated with this last transmit */
                dev_kfree_skb_any(skb);
-               fep->tx_skbuff[index] = NULL;
 
                fep->dirty_tx = bdp;
 
@@ -1163,7 +1165,10 @@ fec_enet_tx(struct net_device *ndev)
                                netif_wake_queue(ndev);
                }
        }
-       return;
+
+       /* ERR006538: Keep the transmitter going */
+       if (bdp != fep->cur_tx && readl(fep->hwp + FEC_X_DES_ACTIVE) == 0)
+               writel(0, fep->hwp + FEC_X_DES_ACTIVE);
 }
 
 /* During a receive, the cur_rx points to the current incoming buffer.
@@ -1209,8 +1214,7 @@ fec_enet_rx(struct net_device *ndev, int budget)
                if ((status & BD_ENET_RX_LAST) == 0)
                        netdev_err(ndev, "rcv is not +last\n");
 
-               if (!fep->opened)
-                       goto rx_processing_done;
+               writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
 
                /* Check for errors. */
                if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
@@ -1294,18 +1298,9 @@ fec_enet_rx(struct net_device *ndev, int budget)
                        skb->protocol = eth_type_trans(skb, ndev);
 
                        /* Get receive timestamp from the skb */
-                       if (fep->hwts_rx_en && fep->bufdesc_ex) {
-                               struct skb_shared_hwtstamps *shhwtstamps =
-                                                           skb_hwtstamps(skb);
-                               unsigned long flags;
-
-                               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-
-                               spin_lock_irqsave(&fep->tmreg_lock, flags);
-                               shhwtstamps->hwtstamp = ns_to_ktime(
-                                   timecounter_cyc2time(&fep->tc, ebdp->ts));
-                               spin_unlock_irqrestore(&fep->tmreg_lock, flags);
-                       }
+                       if (fep->hwts_rx_en && fep->bufdesc_ex)
+                               fec_enet_hwtstamp(fep, ebdp->ts,
+                                                 skb_hwtstamps(skb));
 
                        if (fep->bufdesc_ex &&
                            (fep->csum_flags & FLAG_RX_CSUM_ENABLED)) {
@@ -1363,29 +1358,25 @@ fec_enet_interrupt(int irq, void *dev_id)
 {
        struct net_device *ndev = dev_id;
        struct fec_enet_private *fep = netdev_priv(ndev);
+       const unsigned napi_mask = FEC_ENET_RXF | FEC_ENET_TXF;
        uint int_events;
        irqreturn_t ret = IRQ_NONE;
 
-       do {
-               int_events = readl(fep->hwp + FEC_IEVENT);
-               writel(int_events, fep->hwp + FEC_IEVENT);
+       int_events = readl(fep->hwp + FEC_IEVENT);
+       writel(int_events & ~napi_mask, fep->hwp + FEC_IEVENT);
 
-               if (int_events & (FEC_ENET_RXF | FEC_ENET_TXF)) {
-                       ret = IRQ_HANDLED;
+       if (int_events & napi_mask) {
+               ret = IRQ_HANDLED;
 
-                       /* Disable the RX interrupt */
-                       if (napi_schedule_prep(&fep->napi)) {
-                               writel(FEC_RX_DISABLED_IMASK,
-                                       fep->hwp + FEC_IMASK);
-                               __napi_schedule(&fep->napi);
-                       }
-               }
+               /* Disable the NAPI interrupts */
+               writel(FEC_ENET_MII, fep->hwp + FEC_IMASK);
+               napi_schedule(&fep->napi);
+       }
 
-               if (int_events & FEC_ENET_MII) {
-                       ret = IRQ_HANDLED;
-                       complete(&fep->mdio_done);
-               }
-       } while (int_events);
+       if (int_events & FEC_ENET_MII) {
+               ret = IRQ_HANDLED;
+               complete(&fep->mdio_done);
+       }
 
        return ret;
 }
@@ -1393,8 +1384,16 @@ fec_enet_interrupt(int irq, void *dev_id)
 static int fec_enet_rx_napi(struct napi_struct *napi, int budget)
 {
        struct net_device *ndev = napi->dev;
-       int pkts = fec_enet_rx(ndev, budget);
        struct fec_enet_private *fep = netdev_priv(ndev);
+       int pkts;
+
+       /*
+        * Clear any pending transmit or receive interrupts before
+        * processing the rings to avoid racing with the hardware.
+        */
+       writel(FEC_ENET_RXF | FEC_ENET_TXF, fep->hwp + FEC_IEVENT);
+
+       pkts = fec_enet_rx(ndev, budget);
 
        fec_enet_tx(ndev);
 
@@ -1492,14 +1491,23 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                return;
        }
 
-       if (phy_dev->link) {
+       /*
+        * If the netdev is down, or is going down, we're not interested
+        * in link state events, so just mark our idea of the link as down
+        * and ignore the event.
+        */
+       if (!netif_running(ndev) || !netif_device_present(ndev)) {
+               fep->link = 0;
+       } else if (phy_dev->link) {
                if (!fep->link) {
                        fep->link = phy_dev->link;
                        status_change = 1;
                }
 
-               if (fep->full_duplex != phy_dev->duplex)
+               if (fep->full_duplex != phy_dev->duplex) {
+                       fep->full_duplex = phy_dev->duplex;
                        status_change = 1;
+               }
 
                if (phy_dev->speed != fep->speed) {
                        fep->speed = phy_dev->speed;
@@ -1507,11 +1515,21 @@ static void fec_enet_adjust_link(struct net_device *ndev)
                }
 
                /* if any of the above changed restart the FEC */
-               if (status_change)
-                       fec_restart(ndev, phy_dev->duplex);
+               if (status_change) {
+                       napi_disable(&fep->napi);
+                       netif_tx_lock_bh(ndev);
+                       fec_restart(ndev);
+                       netif_wake_queue(ndev);
+                       netif_tx_unlock_bh(ndev);
+                       napi_enable(&fep->napi);
+               }
        } else {
                if (fep->link) {
+                       napi_disable(&fep->napi);
+                       netif_tx_lock_bh(ndev);
                        fec_stop(ndev);
+                       netif_tx_unlock_bh(ndev);
+                       napi_enable(&fep->napi);
                        fep->link = phy_dev->link;
                        status_change = 1;
                }
@@ -1661,6 +1679,7 @@ static int fec_enet_mii_probe(struct net_device *ndev)
        /* mask with MAC supported features */
        if (id_entry->driver_data & FEC_QUIRK_HAS_GBIT) {
                phy_dev->supported &= PHY_GBIT_FEATURES;
+               phy_dev->supported &= ~SUPPORTED_1000baseT_Half;
 #if !defined(CONFIG_M5272)
                phy_dev->supported |= SUPPORTED_Pause;
 #endif
@@ -1864,6 +1883,9 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       if (!fep->phy_dev)
+               return -ENODEV;
+
        if (pause->tx_pause != pause->rx_pause) {
                netdev_info(ndev,
                        "hardware only support enable/disable both tx and rx");
@@ -1889,8 +1911,14 @@ static int fec_enet_set_pauseparam(struct net_device *ndev,
                        fec_stop(ndev);
                phy_start_aneg(fep->phy_dev);
        }
-       if (netif_running(ndev))
-               fec_restart(ndev, 0);
+       if (netif_running(ndev)) {
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(ndev);
+               fec_restart(ndev);
+               netif_wake_queue(ndev);
+               netif_tx_unlock_bh(ndev);
+               napi_enable(&fep->napi);
+       }
 
        return 0;
 }
@@ -2007,21 +2035,19 @@ static int fec_enet_nway_reset(struct net_device *dev)
 }
 
 static const struct ethtool_ops fec_enet_ethtool_ops = {
-#if !defined(CONFIG_M5272)
-       .get_pauseparam         = fec_enet_get_pauseparam,
-       .set_pauseparam         = fec_enet_set_pauseparam,
-#endif
        .get_settings           = fec_enet_get_settings,
        .set_settings           = fec_enet_set_settings,
        .get_drvinfo            = fec_enet_get_drvinfo,
-       .get_link               = ethtool_op_get_link,
-       .get_ts_info            = fec_enet_get_ts_info,
        .nway_reset             = fec_enet_nway_reset,
+       .get_link               = ethtool_op_get_link,
 #ifndef CONFIG_M5272
-       .get_ethtool_stats      = fec_enet_get_ethtool_stats,
+       .get_pauseparam         = fec_enet_get_pauseparam,
+       .set_pauseparam         = fec_enet_set_pauseparam,
        .get_strings            = fec_enet_get_strings,
+       .get_ethtool_stats      = fec_enet_get_ethtool_stats,
        .get_sset_count         = fec_enet_get_sset_count,
 #endif
+       .get_ts_info            = fec_enet_get_ts_info,
 };
 
 static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
@@ -2055,18 +2081,23 @@ static void fec_enet_free_buffers(struct net_device *ndev)
        bdp = fep->rx_bd_base;
        for (i = 0; i < fep->rx_ring_size; i++) {
                skb = fep->rx_skbuff[i];
-
-               if (bdp->cbd_bufaddr)
+               fep->rx_skbuff[i] = NULL;
+               if (skb) {
                        dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
                                        FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
-               if (skb)
                        dev_kfree_skb(skb);
+               }
                bdp = fec_enet_get_nextdesc(bdp, fep);
        }
 
        bdp = fep->tx_bd_base;
-       for (i = 0; i < fep->tx_ring_size; i++)
+       for (i = 0; i < fep->tx_ring_size; i++) {
                kfree(fep->tx_bounce[i]);
+               fep->tx_bounce[i] = NULL;
+               skb = fep->tx_skbuff[i];
+               fep->tx_skbuff[i] = NULL;
+               dev_kfree_skb(skb);
+       }
 }
 
 static int fec_enet_alloc_buffers(struct net_device *ndev)
@@ -2078,21 +2109,23 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
 
        bdp = fep->rx_bd_base;
        for (i = 0; i < fep->rx_ring_size; i++) {
+               dma_addr_t addr;
+
                skb = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
-               if (!skb) {
-                       fec_enet_free_buffers(ndev);
-                       return -ENOMEM;
-               }
-               fep->rx_skbuff[i] = skb;
+               if (!skb)
+                       goto err_alloc;
 
-               bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, skb->data,
+               addr = dma_map_single(&fep->pdev->dev, skb->data,
                                FEC_ENET_RX_FRSIZE, DMA_FROM_DEVICE);
-               if (dma_mapping_error(&fep->pdev->dev, bdp->cbd_bufaddr)) {
-                       fec_enet_free_buffers(ndev);
+               if (dma_mapping_error(&fep->pdev->dev, addr)) {
+                       dev_kfree_skb(skb);
                        if (net_ratelimit())
                                netdev_err(ndev, "Rx DMA memory map failed\n");
-                       return -ENOMEM;
+                       goto err_alloc;
                }
+
+               fep->rx_skbuff[i] = skb;
+               bdp->cbd_bufaddr = addr;
                bdp->cbd_sc = BD_ENET_RX_EMPTY;
 
                if (fep->bufdesc_ex) {
@@ -2110,6 +2143,8 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
        bdp = fep->tx_bd_base;
        for (i = 0; i < fep->tx_ring_size; i++) {
                fep->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+               if (!fep->tx_bounce[i])
+                       goto err_alloc;
 
                bdp->cbd_sc = 0;
                bdp->cbd_bufaddr = 0;
@@ -2127,6 +2162,10 @@ static int fec_enet_alloc_buffers(struct net_device *ndev)
        bdp->cbd_sc |= BD_SC_WRAP;
 
        return 0;
+
+ err_alloc:
+       fec_enet_free_buffers(ndev);
+       return -ENOMEM;
 }
 
 static int
@@ -2155,10 +2194,10 @@ fec_enet_open(struct net_device *ndev)
                return ret;
        }
 
+       fec_restart(ndev);
        napi_enable(&fep->napi);
        phy_start(fep->phy_dev);
        netif_start_queue(ndev);
-       fep->opened = 1;
        return 0;
 }
 
@@ -2167,17 +2206,17 @@ fec_enet_close(struct net_device *ndev)
 {
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       /* Don't know what to do yet. */
-       napi_disable(&fep->napi);
-       fep->opened = 0;
-       netif_stop_queue(ndev);
-       fec_stop(ndev);
+       phy_stop(fep->phy_dev);
 
-       if (fep->phy_dev) {
-               phy_stop(fep->phy_dev);
-               phy_disconnect(fep->phy_dev);
+       if (netif_device_present(ndev)) {
+               napi_disable(&fep->napi);
+               netif_tx_disable(ndev);
+               fec_stop(ndev);
        }
 
+       phy_disconnect(fep->phy_dev);
+       fep->phy_dev = NULL;
+
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
        fec_enet_free_buffers(ndev);
@@ -2304,12 +2343,21 @@ static void fec_poll_controller(struct net_device *dev)
 }
 #endif
 
+#define FEATURES_NEED_QUIESCE NETIF_F_RXCSUM
+
 static int fec_set_features(struct net_device *netdev,
        netdev_features_t features)
 {
        struct fec_enet_private *fep = netdev_priv(netdev);
        netdev_features_t changed = features ^ netdev->features;
 
+       /* Quiesce the device if necessary */
+       if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(netdev);
+               fec_stop(netdev);
+       }
+
        netdev->features = features;
 
        /* Receive checksum has been changed */
@@ -2318,14 +2366,14 @@ static int fec_set_features(struct net_device *netdev,
                        fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
                else
                        fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
+       }
 
-               if (netif_running(netdev)) {
-                       fec_stop(netdev);
-                       fec_restart(netdev, fep->phy_dev->duplex);
-                       netif_wake_queue(netdev);
-               } else {
-                       fec_restart(netdev, fep->phy_dev->duplex);
-               }
+       /* Resume the device after updates */
+       if (netif_running(netdev) && changed & FEATURES_NEED_QUIESCE) {
+               fec_restart(netdev);
+               netif_wake_queue(netdev);
+               netif_tx_unlock_bh(netdev);
+               napi_enable(&fep->napi);
        }
 
        return 0;
@@ -2426,7 +2474,7 @@ static int fec_enet_init(struct net_device *ndev)
 
        ndev->hw_features = ndev->features;
 
-       fec_restart(ndev, 0);
+       fec_restart(ndev);
 
        return 0;
 }
@@ -2609,7 +2657,7 @@ fec_probe(struct platform_device *pdev)
        if (fep->bufdesc_ex && fep->ptp_clock)
                netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
 
-       INIT_DELAYED_WORK(&(fep->delay_work.delay_work), fec_enet_work);
+       INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
        return 0;
 
 failed_register:
@@ -2634,7 +2682,7 @@ fec_drv_remove(struct platform_device *pdev)
        struct net_device *ndev = platform_get_drvdata(pdev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
-       cancel_delayed_work_sync(&(fep->delay_work.delay_work));
+       cancel_work_sync(&fep->tx_timeout_work);
        unregister_netdev(ndev);
        fec_enet_mii_remove(fep);
        del_timer_sync(&fep->time_keep);
@@ -2648,17 +2696,22 @@ fec_drv_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_PM_SLEEP
-static int
-fec_suspend(struct device *dev)
+static int __maybe_unused fec_suspend(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
 
+       rtnl_lock();
        if (netif_running(ndev)) {
-               fec_stop(ndev);
+               phy_stop(fep->phy_dev);
+               napi_disable(&fep->napi);
+               netif_tx_lock_bh(ndev);
                netif_device_detach(ndev);
+               netif_tx_unlock_bh(ndev);
+               fec_stop(ndev);
        }
+       rtnl_unlock();
+
        fec_enet_clk_enable(ndev, false);
        pinctrl_pm_select_sleep_state(&fep->pdev->dev);
 
@@ -2668,8 +2721,7 @@ fec_suspend(struct device *dev)
        return 0;
 }
 
-static int
-fec_resume(struct device *dev)
+static int __maybe_unused fec_resume(struct device *dev)
 {
        struct net_device *ndev = dev_get_drvdata(dev);
        struct fec_enet_private *fep = netdev_priv(ndev);
@@ -2686,10 +2738,16 @@ fec_resume(struct device *dev)
        if (ret)
                goto failed_clk;
 
+       rtnl_lock();
        if (netif_running(ndev)) {
-               fec_restart(ndev, fep->full_duplex);
+               fec_restart(ndev);
+               netif_tx_lock_bh(ndev);
                netif_device_attach(ndev);
+               netif_tx_unlock_bh(ndev);
+               napi_enable(&fep->napi);
+               phy_start(fep->phy_dev);
        }
+       rtnl_unlock();
 
        return 0;
 
@@ -2698,7 +2756,6 @@ failed_clk:
                regulator_disable(fep->reg_phy);
        return ret;
 }
-#endif /* CONFIG_PM_SLEEP */
 
 static SIMPLE_DEV_PM_OPS(fec_pm_ops, fec_suspend, fec_resume);
 
index fab39e2..8ceaf7a 100644 (file)
@@ -2396,7 +2396,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
                if (netif_msg_ifup(ugeth))
                        pr_err("Bad number of Rx threads value\n");
                return -EINVAL;
-               break;
        }
 
        switch (ug_info->numThreadsTx) {
@@ -2419,7 +2418,6 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
                if (netif_msg_ifup(ugeth))
                        pr_err("Bad number of Tx threads value\n");
                return -EINVAL;
-               break;
        }
 
        /* Calculate rx_extended_features */
@@ -2990,11 +2988,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
        if (ug_info->rxExtendedFiltering) {
                size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
                if (ug_info->largestexternallookupkeysize ==
-                   QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
+                   QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
                        size +=
                            THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
                if (ug_info->largestexternallookupkeysize ==
-                   QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
+                   QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
                        size +=
                            THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
        }
index 0c9d55c..6e7db66 100644 (file)
@@ -46,7 +46,7 @@ struct tgec_mdio_controller {
 #define MDIO_DATA_BSY          (1 << 31)
 
 /*
- * Wait untill the MDIO bus is free
+ * Wait until the MDIO bus is free
  */
 static int xgmac_wait_until_free(struct device *dev,
                                 struct tgec_mdio_controller __iomem *regs)
@@ -163,7 +163,7 @@ static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
        /* Return all Fs if nothing was there */
        if (in_be32(&regs->mdio_stat) & MDIO_STAT_RD_ER) {
                dev_err(&bus->dev,
-                       "Error while reading PHY%d reg at %d.%d\n",
+                       "Error while reading PHY%d reg at %d.%hhu\n",
                        phy_id, dev_addr, regnum);
                return 0xffff;
        }
index d50f78a..cca5bca 100644 (file)
@@ -1316,7 +1316,6 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
        case e1000_82547:
        case e1000_82547_rev_2:
                return e1000_integrated_phy_loopback(adapter);
-               break;
        default:
                /* Default PHY loopback work is to read the MII
                 * control register and assert bit 14 (loopback mode).
@@ -1325,7 +1324,6 @@ static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
                phy_reg |= MII_CR_LOOPBACK;
                e1000_write_phy_reg(hw, PHY_CTRL, phy_reg);
                return 0;
-               break;
        }
 
        return 8;
@@ -1344,7 +1342,6 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
                case e1000_82545_rev_3:
                case e1000_82546_rev_3:
                        return e1000_set_phy_loopback(adapter);
-                       break;
                default:
                        rctl = er32(RCTL);
                        rctl |= E1000_RCTL_LBM_TCVR;
index e9b07cc..1acf503 100644 (file)
@@ -902,7 +902,6 @@ static s32 e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
        default:
                e_dbg("Flow control param set incorrectly\n");
                return -E1000_ERR_CONFIG;
-               break;
        }
 
        /* Since auto-negotiation is enabled, take the link out of reset (the
@@ -5041,7 +5040,6 @@ static s32 e1000_get_cable_length(struct e1000_hw *hw, u16 *min_length,
                        break;
                default:
                        return -E1000_ERR_PHY;
-                       break;
                }
        } else if (hw->phy_type == e1000_phy_igp) {     /* For IGP PHY */
                u16 cur_agc_value;
index 218481e..dc79ed8 100644 (file)
@@ -95,7 +95,6 @@ static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        /* This can only be done after all function pointers are setup. */
@@ -422,7 +421,6 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
                break;
        case e1000_82573:
                return e1000e_get_phy_id(hw);
-               break;
        case e1000_82574:
        case e1000_82583:
                ret_val = e1e_rphy(hw, MII_PHYSID1, &phy_id);
@@ -440,7 +438,6 @@ static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        return 0;
@@ -1458,7 +1455,6 @@ static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        if (ret_val)
index d18e892..bb7ab3c 100644 (file)
 #define E1000_TIPG_IPGR2_SHIFT  20
 
 #define MAX_JUMBO_FRAME_SIZE    0x3F00
+#define E1000_TX_PTR_GAP               0x1F
 
 /* Extended Configuration Control and Size */
 #define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
index 815e26c..865ce45 100644 (file)
@@ -1521,11 +1521,9 @@ static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
                switch (hw->mac.type) {
                case e1000_80003es2lan:
                        return e1000_set_es2lan_mac_loopback(adapter);
-                       break;
                case e1000_82571:
                case e1000_82572:
                        return e1000_set_82571_fiber_loopback(adapter);
-                       break;
                default:
                        rctl = er32(RCTL);
                        rctl |= E1000_RCTL_LBM_TCVR;
index 8894ab8..48b74a5 100644 (file)
@@ -572,7 +572,6 @@ static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_PHY;
-               break;
        }
 
        return 0;
@@ -2445,7 +2444,7 @@ s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
                        return ret_val;
                e1e_rphy(hw, PHY_REG(776, 20), &data);
                data &= ~(0x3FF << 2);
-               data |= (0x1A << 2);
+               data |= (E1000_TX_PTR_GAP << 2);
                ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
                if (ret_val)
                        return ret_val;
@@ -4606,14 +4605,23 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
 
                        /* Disable LPLU if both link partners support 100BaseT
                         * EEE and 100Full is advertised on both ends of the
-                        * link.
+                        * link, and enable Auto Enable LPI since there will
+                        * be no driver to enable LPI while in Sx.
                         */
                        if ((eee_advert & I82579_EEE_100_SUPPORTED) &&
                            (dev_spec->eee_lp_ability &
                             I82579_EEE_100_SUPPORTED) &&
-                           (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
+                           (hw->phy.autoneg_advertised & ADVERTISE_100_FULL)) {
                                phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
                                              E1000_PHY_CTRL_NOND0A_LPLU);
+
+                               /* Set Auto Enable LPI after link up */
+                               e1e_rphy_locked(hw,
+                                               I217_LPI_GPIO_CTRL, &phy_reg);
+                               phy_reg |= I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+                               e1e_wphy_locked(hw,
+                                               I217_LPI_GPIO_CTRL, phy_reg);
+                       }
                }
 
                /* For i217 Intel Rapid Start Technology support,
@@ -4710,6 +4718,11 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
                        return;
                }
 
+               /* Clear Auto Enable LPI after link up */
+               e1e_rphy_locked(hw, I217_LPI_GPIO_CTRL, &phy_reg);
+               phy_reg &= ~I217_LPI_GPIO_CTRL_AUTO_EN_LPI;
+               e1e_wphy_locked(hw, I217_LPI_GPIO_CTRL, phy_reg);
+
                if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
                        /* Restore clear on SMB if no manageability engine
                         * is present
index 5515126..8066a49 100644 (file)
 #define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_MASK     0x3F00
 #define I217_INBAND_CTRL_LINK_STAT_TX_TIMEOUT_SHIFT    8
 
+/* Low Power Idle GPIO Control */
+#define I217_LPI_GPIO_CTRL                     PHY_REG(772, 18)
+#define I217_LPI_GPIO_CTRL_AUTO_EN_LPI         0x0800
+
 /* PHY Low Power Idle Control */
 #define I82579_LPI_CTRL                                PHY_REG(772, 20)
 #define I82579_LPI_CTRL_100_ENABLE             0x2000
index 8c386f3..30b74d5 100644 (file)
@@ -787,7 +787,6 @@ static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
        default:
                e_dbg("Flow control param set incorrectly\n");
                return -E1000_ERR_CONFIG;
-               break;
        }
 
        ew32(TXCW, txcw);
index cb37ff1..5885603 100644 (file)
@@ -327,9 +327,12 @@ bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
        } else if ((hw->mac.type == e1000_82574) ||
                   (hw->mac.type == e1000_82583)) {
                u16 data;
+               s32 ret_val;
 
                factps = er32(FACTPS);
-               e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+               ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+               if (ret_val)
+                       return false;
 
                if (!(factps & E1000_FACTPS_MNGCG) &&
                    ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
index 201cc93..65c3aef 100644 (file)
@@ -6033,6 +6033,28 @@ release:
        return retval;
 }
 
+static void e1000e_flush_lpic(struct pci_dev *pdev)
+{
+       struct net_device *netdev = pci_get_drvdata(pdev);
+       struct e1000_adapter *adapter = netdev_priv(netdev);
+       struct e1000_hw *hw = &adapter->hw;
+       u32 ret_val;
+
+       pm_runtime_get_sync(netdev->dev.parent);
+
+       ret_val = hw->phy.ops.acquire(hw);
+       if (ret_val)
+               goto fl_out;
+
+       pr_info("EEE TX LPI TIMER: %08X\n",
+               er32(LPIC) >> E1000_LPIC_LPIET_SHIFT);
+
+       hw->phy.ops.release(hw);
+
+fl_out:
+       pm_runtime_put_sync(netdev->dev.parent);
+}
+
 static int e1000e_pm_freeze(struct device *dev)
 {
        struct net_device *netdev = pci_get_drvdata(to_pci_dev(dev));
@@ -6333,6 +6355,8 @@ static int e1000e_pm_suspend(struct device *dev)
 {
        struct pci_dev *pdev = to_pci_dev(dev);
 
+       e1000e_flush_lpic(pdev);
+
        e1000e_pm_freeze(dev);
 
        return __e1000_shutdown(pdev, false);
@@ -6357,9 +6381,14 @@ static int e1000e_pm_runtime_idle(struct device *dev)
        struct pci_dev *pdev = to_pci_dev(dev);
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct e1000_adapter *adapter = netdev_priv(netdev);
+       u16 eee_lp;
 
-       if (!e1000e_has_link(adapter))
+       eee_lp = adapter->hw.dev_spec.ich8lan.eee_lp_ability;
+
+       if (!e1000e_has_link(adapter)) {
+               adapter->hw.dev_spec.ich8lan.eee_lp_ability = eee_lp;
                pm_schedule_suspend(dev, 5 * MSEC_PER_SEC);
+       }
 
        return -EBUSY;
 }
@@ -6411,6 +6440,8 @@ static int e1000e_pm_runtime_suspend(struct device *dev)
 
 static void e1000_shutdown(struct pci_dev *pdev)
 {
+       e1000e_flush_lpic(pdev);
+
        e1000e_pm_freeze(&pdev->dev);
 
        __e1000_shutdown(pdev, false);
@@ -6708,6 +6739,7 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        int bars, i, err, pci_using_dac;
        u16 eeprom_data = 0;
        u16 eeprom_apme_mask = E1000_EEPROM_APME;
+       s32 rval = 0;
 
        if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
                aspm_disable_flag = PCIE_LINK_STATE_L0S;
@@ -6940,15 +6972,19 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        } else if (adapter->flags & FLAG_APME_IN_CTRL3) {
                if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
                    (adapter->hw.bus.func == 1))
-                       e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_B,
-                                      1, &eeprom_data);
+                       rval = e1000_read_nvm(&adapter->hw,
+                                             NVM_INIT_CONTROL3_PORT_B,
+                                             1, &eeprom_data);
                else
-                       e1000_read_nvm(&adapter->hw, NVM_INIT_CONTROL3_PORT_A,
-                                      1, &eeprom_data);
+                       rval = e1000_read_nvm(&adapter->hw,
+                                             NVM_INIT_CONTROL3_PORT_A,
+                                             1, &eeprom_data);
        }
 
        /* fetch WoL from EEPROM */
-       if (eeprom_data & eeprom_apme_mask)
+       if (rval)
+               e_dbg("NVM read error getting WoL initial values: %d\n", rval);
+       else if (eeprom_data & eeprom_apme_mask)
                adapter->eeprom_wol |= E1000_WUFC_MAG;
 
        /* now that we have the eeprom settings, apply the special cases
@@ -6967,7 +7003,12 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                device_wakeup_enable(&pdev->dev);
 
        /* save off EEPROM version number */
-       e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+       rval = e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+
+       if (rval) {
+               e_dbg("NVM read error getting EEPROM version: %d\n", rval);
+               adapter->eeprom_vers = 0;
+       }
 
        /* reset the hardware with the new settings */
        e1000e_reset(adapter);
index b1f212b..fa6b103 100644 (file)
@@ -327,8 +327,10 @@ s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
 
                ew32(EERD, eerd);
                ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
-               if (ret_val)
+               if (ret_val) {
+                       e_dbg("NVM read error: %d\n", ret_val);
                        break;
+               }
 
                data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
        }
index d9eb80a..4b94ddb 100644 (file)
@@ -44,3 +44,4 @@ i40e-objs := i40e_main.o \
        i40e_virtchnl_pf.o
 
 i40e-$(CONFIG_I40E_DCB) += i40e_dcb.o i40e_dcb_nl.o
+i40e-$(CONFIG_FCOE:m=y) += i40e_fcoe.o
index 6598584..801da39 100644 (file)
@@ -54,6 +54,9 @@
 #include <linux/ptp_clock_kernel.h>
 #include "i40e_type.h"
 #include "i40e_prototype.h"
+#ifdef I40E_FCOE
+#include "i40e_fcoe.h"
+#endif
 #include "i40e_virtchnl.h"
 #include "i40e_virtchnl_pf.h"
 #include "i40e_txrx.h"
 #define I40E_MAX_QUEUES_PER_TC        64 /* should be a power of 2 */
 #define I40E_FDIR_RING                0
 #define I40E_FDIR_RING_COUNT          32
+#ifdef I40E_FCOE
+#define I40E_DEFAULT_FCOE             8 /* default number of QPs for FCoE */
+#define I40E_MINIMUM_FCOE             1 /* minimum number of QPs for FCoE */
+#endif /* I40E_FCOE */
 #define I40E_MAX_AQ_BUF_SIZE          4096
 #define I40E_AQ_LEN                   32
 #define I40E_AQ_WORK_LIMIT            16
 #define I40E_MAX_USER_PRIORITY        8
 #define I40E_DEFAULT_MSG_ENABLE       4
+#define I40E_QUEUE_WAIT_RETRY_LIMIT   10
 
 #define I40E_NVM_VERSION_LO_SHIFT  0
 #define I40E_NVM_VERSION_LO_MASK   (0xff << I40E_NVM_VERSION_LO_SHIFT)
@@ -133,7 +141,9 @@ enum i40e_state_t {
        __I40E_EMP_RESET_REQUESTED,
        __I40E_FILTER_OVERFLOW_PROMISC,
        __I40E_SUSPENDED,
+       __I40E_PTP_TX_IN_PROGRESS,
        __I40E_BAD_EEPROM,
+       __I40E_DOWN_REQUESTED,
 };
 
 enum i40e_interrupt_policy {
@@ -152,7 +162,7 @@ struct i40e_lump_tracking {
 #define I40E_DEFAULT_ATR_SAMPLE_RATE   20
 #define I40E_FDIR_MAX_RAW_PACKET_SIZE  512
 #define I40E_FDIR_BUFFER_FULL_MARGIN   10
-#define I40E_FDIR_BUFFER_HEAD_ROOM     200
+#define I40E_FDIR_BUFFER_HEAD_ROOM     32
 
 enum i40e_fd_stat_idx {
        I40E_FD_STAT_ATR,
@@ -222,6 +232,10 @@ struct i40e_pf {
        u16 num_vmdq_msix;         /* num queue vectors per vmdq pool */
        u16 num_req_vfs;           /* num vfs requested for this vf */
        u16 num_vf_qps;            /* num queue pairs per vf */
+#ifdef I40E_FCOE
+       u16 num_fcoe_qps;          /* num fcoe queues this pf has set up */
+       u16 num_fcoe_msix;         /* num queue vectors per fcoe pool */
+#endif /* I40E_FCOE */
        u16 num_lan_qps;           /* num lan queues this pf has set up */
        u16 num_lan_msix;          /* num queue vectors for the base pf vsi */
        int queues_left;           /* queues left unclaimed */
@@ -262,6 +276,9 @@ struct i40e_pf {
 #define I40E_FLAG_VMDQ_ENABLED                 (u64)(1 << 7)
 #define I40E_FLAG_FDIR_REQUIRES_REINIT         (u64)(1 << 8)
 #define I40E_FLAG_NEED_LINK_UPDATE             (u64)(1 << 9)
+#ifdef I40E_FCOE
+#define I40E_FLAG_FCOE_ENABLED                 (u64)(1 << 11)
+#endif /* I40E_FCOE */
 #define I40E_FLAG_IN_NETPOLL                   (u64)(1 << 12)
 #define I40E_FLAG_16BYTE_RX_DESC_ENABLED       (u64)(1 << 13)
 #define I40E_FLAG_CLEAN_ADMINQ                 (u64)(1 << 14)
@@ -277,11 +294,16 @@ struct i40e_pf {
 #ifdef CONFIG_I40E_VXLAN
 #define I40E_FLAG_VXLAN_FILTER_SYNC            (u64)(1 << 27)
 #endif
+#define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
 #define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
 
+#ifdef I40E_FCOE
+       struct i40e_fcoe fcoe;
+
+#endif /* I40E_FCOE */
        bool stat_offsets_loaded;
        struct i40e_hw_port_stats stats;
        struct i40e_hw_port_stats stats_offsets;
@@ -348,6 +370,7 @@ struct i40e_pf {
        u32 rx_hwtstamp_cleared;
        bool ptp_tx;
        bool ptp_rx;
+       u16 rss_table_size;
 };
 
 struct i40e_mac_filter {
@@ -359,6 +382,7 @@ struct i40e_mac_filter {
        bool is_vf;             /* filter belongs to a VF */
        bool is_netdev;         /* filter belongs to a netdev */
        bool changed;           /* filter needs to be sync'd to the HW */
+       bool is_laa;            /* filter is a Locally Administered Address */
 };
 
 struct i40e_veb {
@@ -402,6 +426,11 @@ struct i40e_vsi {
        struct rtnl_link_stats64 net_stats_offsets;
        struct i40e_eth_stats eth_stats;
        struct i40e_eth_stats eth_stats_offsets;
+#ifdef I40E_FCOE
+       struct i40e_fcoe_stats fcoe_stats;
+       struct i40e_fcoe_stats fcoe_stats_offsets;
+       bool fcoe_stat_offsets_loaded;
+#endif
        u32 tx_restart;
        u32 tx_busy;
        u32 rx_buf_failed;
@@ -578,6 +607,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add);
 void i40e_fdir_check_and_reenable(struct i40e_pf *pf);
 int i40e_get_current_fd_count(struct i40e_pf *pf);
+int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf);
 bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features);
 void i40e_set_ethtool_ops(struct net_device *netdev);
 struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
@@ -591,6 +621,11 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
 int i40e_vsi_release(struct i40e_vsi *vsi);
 struct i40e_vsi *i40e_vsi_lookup(struct i40e_pf *pf, enum i40e_vsi_type type,
                                 struct i40e_vsi *start_vsi);
+#ifdef I40E_FCOE
+void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+                             struct i40e_vsi_context *ctxt,
+                             u8 enabled_tc, bool is_add);
+#endif
 int i40e_vsi_control_rings(struct i40e_vsi *vsi, bool enable);
 int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count);
 struct i40e_veb *i40e_veb_setup(struct i40e_pf *pf, u16 flags, u16 uplink_seid,
@@ -614,9 +649,24 @@ static inline void i40e_dbg_init(void) {}
 static inline void i40e_dbg_exit(void) {}
 #endif /* CONFIG_DEBUG_FS*/
 void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector);
+void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector);
 void i40e_irq_dynamic_disable_icr0(struct i40e_pf *pf);
 void i40e_irq_dynamic_enable_icr0(struct i40e_pf *pf);
+#ifdef I40E_FCOE
+struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+                                            struct net_device *netdev,
+                                            struct rtnl_link_stats64 *storage);
+int i40e_set_mac(struct net_device *netdev, void *p);
+void i40e_set_rx_mode(struct net_device *netdev);
+#endif
 int i40e_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+#ifdef I40E_FCOE
+void i40e_tx_timeout(struct net_device *netdev);
+int i40e_vlan_rx_add_vid(struct net_device *netdev,
+                        __always_unused __be16 proto, u16 vid);
+int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+                         __always_unused __be16 proto, u16 vid);
+#endif
 int i40e_vsi_open(struct i40e_vsi *vsi);
 void i40e_vlan_stripping_disable(struct i40e_vsi *vsi);
 int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid);
@@ -626,6 +676,26 @@ struct i40e_mac_filter *i40e_put_mac_in_vlan(struct i40e_vsi *vsi, u8 *macaddr,
 bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
 struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, u8 *macaddr,
                                      bool is_vf, bool is_netdev);
+#ifdef I40E_FCOE
+int i40e_open(struct net_device *netdev);
+int i40e_close(struct net_device *netdev);
+int i40e_setup_tc(struct net_device *netdev, u8 tc);
+void i40e_netpoll(struct net_device *netdev);
+int i40e_fcoe_enable(struct net_device *netdev);
+int i40e_fcoe_disable(struct net_device *netdev);
+int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt);
+u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf);
+void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi);
+void i40e_fcoe_vsi_setup(struct i40e_pf *pf);
+int i40e_init_pf_fcoe(struct i40e_pf *pf);
+int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi);
+void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi);
+int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc,
+                            struct sk_buff *skb);
+void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc, u8 prog_id);
+#endif /* I40E_FCOE */
 void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
 #ifdef CONFIG_I40E_DCB
 void i40e_dcbnl_flush_apps(struct i40e_pf *pf,
index 7a02749..b29c157 100644 (file)
@@ -38,8 +38,8 @@ static void i40e_resume_aq(struct i40e_hw *hw);
  **/
 static inline bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
 {
-       return (desc->opcode == i40e_aqc_opc_nvm_erase) ||
-              (desc->opcode == i40e_aqc_opc_nvm_update);
+       return (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_erase)) ||
+               (desc->opcode == cpu_to_le16(i40e_aqc_opc_nvm_update));
 }
 
 /**
@@ -55,16 +55,24 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
                hw->aq.asq.tail = I40E_VF_ATQT1;
                hw->aq.asq.head = I40E_VF_ATQH1;
                hw->aq.asq.len  = I40E_VF_ATQLEN1;
+               hw->aq.asq.bal  = I40E_VF_ATQBAL1;
+               hw->aq.asq.bah  = I40E_VF_ATQBAH1;
                hw->aq.arq.tail = I40E_VF_ARQT1;
                hw->aq.arq.head = I40E_VF_ARQH1;
                hw->aq.arq.len  = I40E_VF_ARQLEN1;
+               hw->aq.arq.bal  = I40E_VF_ARQBAL1;
+               hw->aq.arq.bah  = I40E_VF_ARQBAH1;
        } else {
                hw->aq.asq.tail = I40E_PF_ATQT;
                hw->aq.asq.head = I40E_PF_ATQH;
                hw->aq.asq.len  = I40E_PF_ATQLEN;
+               hw->aq.asq.bal  = I40E_PF_ATQBAL;
+               hw->aq.asq.bah  = I40E_PF_ATQBAH;
                hw->aq.arq.tail = I40E_PF_ARQT;
                hw->aq.arq.head = I40E_PF_ARQH;
                hw->aq.arq.len  = I40E_PF_ARQLEN;
+               hw->aq.arq.bal  = I40E_PF_ARQBAL;
+               hw->aq.arq.bah  = I40E_PF_ARQBAH;
        }
 }
 
@@ -296,27 +304,18 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the transmit queue */
-               wr32(hw, I40E_VF_ATQBAH1,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQBAL1,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
-                                         I40E_VF_ATQLEN1_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ATQBAL1);
-       } else {
-               /* configure the transmit queue */
-               wr32(hw, I40E_PF_ATQBAH,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQBAL,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
-                                         I40E_PF_ATQLEN_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ATQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.asq.head, 0);
+       wr32(hw, hw->aq.asq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+                                 I40E_PF_ATQLEN_ATQENABLE_MASK));
+       wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+       wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.asq.bal);
        if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -334,30 +333,21 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the receive queue */
-               wr32(hw, I40E_VF_ARQBAH1,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQBAL1,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
-                                         I40E_VF_ARQLEN1_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ARQBAL1);
-       } else {
-               /* configure the receive queue */
-               wr32(hw, I40E_PF_ARQBAH,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQBAL,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
-                                         I40E_PF_ARQLEN_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ARQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.arq.head, 0);
+       wr32(hw, hw->aq.arq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+                                 I40E_PF_ARQLEN_ARQENABLE_MASK));
+       wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+       wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 
        /* Update tail in the HW to post pre-allocated buffers */
        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.arq.bal);
        if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -499,6 +489,8 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
        wr32(hw, hw->aq.asq.head, 0);
        wr32(hw, hw->aq.asq.tail, 0);
        wr32(hw, hw->aq.asq.len, 0);
+       wr32(hw, hw->aq.asq.bal, 0);
+       wr32(hw, hw->aq.asq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.asq_mutex);
@@ -530,6 +522,8 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
        wr32(hw, hw->aq.arq.head, 0);
        wr32(hw, hw->aq.arq.tail, 0);
        wr32(hw, hw->aq.arq.len, 0);
+       wr32(hw, hw->aq.arq.bal, 0);
+       wr32(hw, hw->aq.arq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.arq_mutex);
@@ -577,6 +571,9 @@ i40e_status i40e_init_adminq(struct i40e_hw *hw)
        /* Set up register offsets */
        i40e_adminq_init_regs(hw);
 
+       /* setup ASQ command write back timeout */
+       hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
        /* allocate the ASQ */
        ret_code = i40e_init_asq(hw);
        if (ret_code)
@@ -677,6 +674,10 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
        while (rd32(hw, hw->aq.asq.head) != ntc) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "%s: ntc %d head %d.\n", __func__, ntc,
+                          rd32(hw, hw->aq.asq.head));
+
                if (details->callback) {
                        I40E_ADMINQ_CALLBACK cb_func =
                                        (I40E_ADMINQ_CALLBACK)details->callback;
@@ -736,6 +737,15 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        struct i40e_aq_desc *desc_on_ring;
        bool cmd_completed = false;
        u16  retval = 0;
+       u32  val = 0;
+
+       val = rd32(hw, hw->aq.asq.head);
+       if (val >= hw->aq.num_asq_entries) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: head overrun at %d\n", val);
+               status = I40E_ERR_QUEUE_EMPTY;
+               goto asq_send_command_exit;
+       }
 
        if (hw->aq.asq.count == 0) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
@@ -829,6 +839,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
        }
 
        /* bump the tail */
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
        i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
        (hw->aq.asq.next_to_use)++;
        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
@@ -852,7 +863,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                        /* ugh! delay while spin_lock */
                        udelay(delay_len);
                        total_delay += delay_len;
-               } while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
+               } while (total_delay < hw->aq.asq_cmd_timeout);
        }
 
        /* if ready, copy the desc back to temp */
@@ -866,6 +877,7 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                                   I40E_DEBUG_AQ_MESSAGE,
                                   "AQTX: Command completed with error 0x%X.\n",
                                   retval);
+
                        /* strip off FW internal code */
                        retval &= 0xff;
                }
@@ -877,8 +889,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
        }
 
-       if (i40e_is_nvm_update_op(desc))
-               hw->aq.nvm_busy = true;
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                  "AQTX: desc and buffer writeback:\n");
+       i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
 
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
@@ -889,6 +902,9 @@ i40e_status i40e_asq_send_command(struct i40e_hw *hw,
                status = I40E_ERR_ADMIN_QUEUE_TIMEOUT;
        }
 
+       if (!status && i40e_is_nvm_update_op(desc))
+               hw->aq.nvm_busy = true;
+
 asq_send_command_error:
        mutex_unlock(&hw->aq.asq_mutex);
 asq_send_command_exit:
@@ -951,10 +967,6 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
        /* now clean the next descriptor */
        desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
        desc_idx = ntc;
-       i40e_debug_aq(hw,
-                     I40E_DEBUG_AQ_COMMAND,
-                     (void *)desc,
-                     hw->aq.arq.r.arq_bi[desc_idx].va);
 
        flags = le16_to_cpu(desc->flags);
        if (flags & I40E_AQ_FLAG_ERR) {
@@ -965,17 +977,17 @@ i40e_status i40e_clean_arq_element(struct i40e_hw *hw,
                           I40E_DEBUG_AQ_MESSAGE,
                           "AQRX: Event received with error 0x%X.\n",
                           hw->aq.arq_last_status);
-       } else {
-               e->desc = *desc;
-               datalen = le16_to_cpu(desc->datalen);
-               e->msg_size = min(datalen, e->msg_size);
-               if (e->msg_buf != NULL && (e->msg_size != 0))
-                       memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
-                              e->msg_size);
        }
 
-       if (i40e_is_nvm_update_op(&e->desc))
-               hw->aq.nvm_busy = false;
+       e->desc = *desc;
+       datalen = le16_to_cpu(desc->datalen);
+       e->msg_size = min(datalen, e->msg_size);
+       if (e->msg_buf != NULL && (e->msg_size != 0))
+               memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+                      e->msg_size);
+
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+       i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
 
        /* Restore the original datalen and buffer address in the desc,
         * FW updates datalen to indicate the event message
@@ -1006,6 +1018,14 @@ clean_arq_element_out:
                *pending = (ntc > ntu ? hw->aq.arq.count : 0) + (ntu - ntc);
        mutex_unlock(&hw->aq.arq_mutex);
 
+       if (i40e_is_nvm_update_op(&e->desc)) {
+               hw->aq.nvm_busy = false;
+               if (hw->aq.nvm_release_on_done) {
+                       i40e_release_nvm(hw);
+                       hw->aq.nvm_release_on_done = false;
+               }
+       }
+
        return ret_code;
 }
 
index b1552fb..ba38a89 100644 (file)
@@ -56,6 +56,8 @@ struct i40e_adminq_ring {
        u32 head;
        u32 tail;
        u32 len;
+       u32 bah;
+       u32 bal;
 };
 
 /* ASQ transaction details */
@@ -82,6 +84,7 @@ struct i40e_arq_event_info {
 struct i40e_adminq_info {
        struct i40e_adminq_ring arq;    /* receive queue */
        struct i40e_adminq_ring asq;    /* send queue */
+       u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
        u16 num_arq_entries;            /* receive queue depth */
        u16 num_asq_entries;            /* send queue depth */
        u16 arq_buf_size;               /* receive queue buffer size */
@@ -91,6 +94,7 @@ struct i40e_adminq_info {
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
        bool nvm_busy;
+       bool nvm_release_on_done;
 
        struct mutex asq_mutex; /* Send queue lock */
        struct mutex arq_mutex; /* Receive queue lock */
@@ -100,6 +104,41 @@ struct i40e_adminq_info {
        enum i40e_admin_queue_err arq_last_status;
 };
 
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_rc: AdminQ error code to convert
+ **/
+static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+{
+       int aq_to_posix[] = {
+               0,           /* I40E_AQ_RC_OK */
+               -EPERM,      /* I40E_AQ_RC_EPERM */
+               -ENOENT,     /* I40E_AQ_RC_ENOENT */
+               -ESRCH,      /* I40E_AQ_RC_ESRCH */
+               -EINTR,      /* I40E_AQ_RC_EINTR */
+               -EIO,        /* I40E_AQ_RC_EIO */
+               -ENXIO,      /* I40E_AQ_RC_ENXIO */
+               -E2BIG,      /* I40E_AQ_RC_E2BIG */
+               -EAGAIN,     /* I40E_AQ_RC_EAGAIN */
+               -ENOMEM,     /* I40E_AQ_RC_ENOMEM */
+               -EACCES,     /* I40E_AQ_RC_EACCES */
+               -EFAULT,     /* I40E_AQ_RC_EFAULT */
+               -EBUSY,      /* I40E_AQ_RC_EBUSY */
+               -EEXIST,     /* I40E_AQ_RC_EEXIST */
+               -EINVAL,     /* I40E_AQ_RC_EINVAL */
+               -ENOTTY,     /* I40E_AQ_RC_ENOTTY */
+               -ENOSPC,     /* I40E_AQ_RC_ENOSPC */
+               -ENOSYS,     /* I40E_AQ_RC_ENOSYS */
+               -ERANGE,     /* I40E_AQ_RC_ERANGE */
+               -EPIPE,      /* I40E_AQ_RC_EFLUSHED */
+               -ESPIPE,     /* I40E_AQ_RC_BAD_ADDR */
+               -EROFS,      /* I40E_AQ_RC_EMODE */
+               -EFBIG,      /* I40E_AQ_RC_EFBIG */
+       };
+
+       return aq_to_posix[aq_rc];
+}
+
 /* general information */
 #define I40E_AQ_LARGE_BUF      512
 #define I40E_ASQ_CMD_TIMEOUT   100000  /* usecs */
index 6e65f19..df43e7c 100644 (file)
@@ -554,7 +554,6 @@ i40e_status i40e_init_shared_code(struct i40e_hw *hw)
                break;
        default:
                return I40E_ERR_DEVICE_NOT_SUPPORTED;
-               break;
        }
 
        hw->phy.get_link_info = true;
@@ -654,6 +653,31 @@ i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
        return status;
 }
 
+/**
+ * i40e_get_port_mac_addr - get Port MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to Port MAC address
+ *
+ * Reads the adapter's Port MAC address
+ **/
+i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+       struct i40e_aqc_mac_address_read_data addrs;
+       i40e_status status;
+       u16 flags = 0;
+
+       status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+       if (status)
+               return status;
+
+       if (flags & I40E_AQC_PORT_ADDR_VALID)
+               memcpy(mac_addr, &addrs.port_mac, sizeof(addrs.port_mac));
+       else
+               status = I40E_ERR_INVALID_MAC_ADDR;
+
+       return status;
+}
+
 /**
  * i40e_pre_tx_queue_cfg - pre tx queue configure
  * @hw: pointer to the HW structure
@@ -669,8 +693,10 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
        u32 reg_block = 0;
        u32 reg_val;
 
-       if (abs_queue_idx >= 128)
+       if (abs_queue_idx >= 128) {
                reg_block = abs_queue_idx / 128;
+               abs_queue_idx %= 128;
+       }
 
        reg_val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
        reg_val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
@@ -683,6 +709,33 @@ void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable)
 
        wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), reg_val);
 }
+#ifdef I40E_FCOE
+
+/**
+ * i40e_get_san_mac_addr - get SAN MAC address
+ * @hw: pointer to the HW structure
+ * @mac_addr: pointer to SAN MAC address
+ *
+ * Reads the adapter's SAN MAC address from NVM
+ **/
+i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr)
+{
+       struct i40e_aqc_mac_address_read_data addrs;
+       i40e_status status;
+       u16 flags = 0;
+
+       status = i40e_aq_mac_address_read(hw, &flags, &addrs, NULL);
+       if (status)
+               return status;
+
+       if (flags & I40E_AQC_SAN_ADDR_VALID)
+               memcpy(mac_addr, &addrs.pf_san_mac, sizeof(addrs.pf_san_mac));
+       else
+               status = I40E_ERR_INVALID_MAC_ADDR;
+
+       return status;
+}
+#endif
 
 /**
  * i40e_get_media_type - Gets media type
@@ -810,6 +863,99 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
        return 0;
 }
 
+/**
+ * i40e_clear_hw - clear out any left over hw state
+ * @hw: pointer to the hw struct
+ *
+ * Clear queues and interrupts, typically called at init time,
+ * but after the capabilities have been found so we know how many
+ * queues and msix vectors have been allocated.
+ **/
+void i40e_clear_hw(struct i40e_hw *hw)
+{
+       u32 num_queues, base_queue;
+       u32 num_pf_int;
+       u32 num_vf_int;
+       u32 num_vfs;
+       u32 i, j;
+       u32 val;
+       u32 eol = 0x7ff;
+
+       /* get number of interrupts, queues, and vfs */
+       val = rd32(hw, I40E_GLPCI_CNF2);
+       num_pf_int = (val & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
+                    I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
+       num_vf_int = (val & I40E_GLPCI_CNF2_MSI_X_VF_N_MASK) >>
+                    I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT;
+
+       val = rd32(hw, I40E_PFLAN_QALLOC);
+       base_queue = (val & I40E_PFLAN_QALLOC_FIRSTQ_MASK) >>
+                    I40E_PFLAN_QALLOC_FIRSTQ_SHIFT;
+       j = (val & I40E_PFLAN_QALLOC_LASTQ_MASK) >>
+           I40E_PFLAN_QALLOC_LASTQ_SHIFT;
+       if (val & I40E_PFLAN_QALLOC_VALID_MASK)
+               num_queues = (j - base_queue) + 1;
+       else
+               num_queues = 0;
+
+       val = rd32(hw, I40E_PF_VT_PFALLOC);
+       i = (val & I40E_PF_VT_PFALLOC_FIRSTVF_MASK) >>
+           I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT;
+       j = (val & I40E_PF_VT_PFALLOC_LASTVF_MASK) >>
+           I40E_PF_VT_PFALLOC_LASTVF_SHIFT;
+       if (val & I40E_PF_VT_PFALLOC_VALID_MASK)
+               num_vfs = (j - i) + 1;
+       else
+               num_vfs = 0;
+
+       /* stop all the interrupts */
+       wr32(hw, I40E_PFINT_ICR0_ENA, 0);
+       val = 0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+       for (i = 0; i < num_pf_int - 2; i++)
+               wr32(hw, I40E_PFINT_DYN_CTLN(i), val);
+
+       /* Set the FIRSTQ_INDX field to 0x7FF in PFINT_LNKLSTx */
+       val = eol << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+       wr32(hw, I40E_PFINT_LNKLST0, val);
+       for (i = 0; i < num_pf_int - 2; i++)
+               wr32(hw, I40E_PFINT_LNKLSTN(i), val);
+       val = eol << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT;
+       for (i = 0; i < num_vfs; i++)
+               wr32(hw, I40E_VPINT_LNKLST0(i), val);
+       for (i = 0; i < num_vf_int - 2; i++)
+               wr32(hw, I40E_VPINT_LNKLSTN(i), val);
+
+       /* warn the HW of the coming Tx disables */
+       for (i = 0; i < num_queues; i++) {
+               u32 abs_queue_idx = base_queue + i;
+               u32 reg_block = 0;
+
+               if (abs_queue_idx >= 128) {
+                       reg_block = abs_queue_idx / 128;
+                       abs_queue_idx %= 128;
+               }
+
+               val = rd32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block));
+               val &= ~I40E_GLLAN_TXPRE_QDIS_QINDX_MASK;
+               val |= (abs_queue_idx << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT);
+               val |= I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK;
+
+               wr32(hw, I40E_GLLAN_TXPRE_QDIS(reg_block), val);
+       }
+       udelay(400);
+
+       /* stop all the queues */
+       for (i = 0; i < num_queues; i++) {
+               wr32(hw, I40E_QINT_TQCTL(i), 0);
+               wr32(hw, I40E_QTX_ENA(i), 0);
+               wr32(hw, I40E_QINT_RQCTL(i), 0);
+               wr32(hw, I40E_QRX_ENA(i), 0);
+       }
+
+       /* short wait for all queue disables to settle */
+       udelay(50);
+}
+
 /**
  * i40e_clear_pxe_mode - clear pxe operations mode
  * @hw: pointer to the hw struct
@@ -941,6 +1087,164 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
 
 /* Admin command wrappers */
 
+/**
+ * i40e_aq_get_phy_capabilities
+ * @hw: pointer to the hw struct
+ * @abilities: structure for PHY capabilities to be filled
+ * @qualified_modules: report Qualified Modules
+ * @report_init: report init capabilities (active are default)
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Returns the various PHY abilities supported on the Port.
+ **/
+i40e_status i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+                       bool qualified_modules, bool report_init,
+                       struct i40e_aq_get_phy_abilities_resp *abilities,
+                       struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       i40e_status status;
+       u16 abilities_size = sizeof(struct i40e_aq_get_phy_abilities_resp);
+
+       if (!abilities)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_get_phy_abilities);
+
+       desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_BUF);
+       if (abilities_size > I40E_AQ_LARGE_BUF)
+               desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+       if (qualified_modules)
+               desc.params.external.param0 |=
+                       cpu_to_le32(I40E_AQ_PHY_REPORT_QUALIFIED_MODULES);
+
+       if (report_init)
+               desc.params.external.param0 |=
+                       cpu_to_le32(I40E_AQ_PHY_REPORT_INITIAL_VALUES);
+
+       status = i40e_asq_send_command(hw, &desc, abilities, abilities_size,
+                                      cmd_details);
+
+       if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
+               status = I40E_ERR_UNKNOWN_PHY;
+
+       return status;
+}
+
+/**
+ * i40e_aq_set_phy_config
+ * @hw: pointer to the hw struct
+ * @config: structure with PHY configuration to be set
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set the various PHY configuration parameters
+ * supported on the Port.One or more of the Set PHY config parameters may be
+ * ignored in an MFP mode as the PF may not have the privilege to set some
+ * of the PHY Config parameters. This status will be indicated by the
+ * command response.
+ **/
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+                               struct i40e_aq_set_phy_config *config,
+                               struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aq_set_phy_config *cmd =
+                       (struct i40e_aq_set_phy_config *)&desc.params.raw;
+       enum i40e_status_code status;
+
+       if (!config)
+               return I40E_ERR_PARAM;
+
+       i40e_fill_default_direct_cmd_desc(&desc,
+                                         i40e_aqc_opc_set_phy_config);
+
+       *cmd = *config;
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
+/**
+ * i40e_set_fc
+ * @hw: pointer to the hw struct
+ *
+ * Set the requested flow control mode using set_phy_config.
+ **/
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+                                 bool atomic_restart)
+{
+       enum i40e_fc_mode fc_mode = hw->fc.requested_mode;
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       struct i40e_aq_set_phy_config config;
+       enum i40e_status_code status;
+       u8 pause_mask = 0x0;
+
+       *aq_failures = 0x0;
+
+       switch (fc_mode) {
+       case I40E_FC_FULL:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+               break;
+       case I40E_FC_RX_PAUSE:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_RX;
+               break;
+       case I40E_FC_TX_PAUSE:
+               pause_mask |= I40E_AQ_PHY_FLAG_PAUSE_TX;
+               break;
+       default:
+               break;
+       }
+
+       /* Get the current phy config */
+       status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+                                             NULL);
+       if (status) {
+               *aq_failures |= I40E_SET_FC_AQ_FAIL_GET;
+               return status;
+       }
+
+       memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+       /* clear the old pause settings */
+       config.abilities = abilities.abilities & ~(I40E_AQ_PHY_FLAG_PAUSE_TX) &
+                          ~(I40E_AQ_PHY_FLAG_PAUSE_RX);
+       /* set the new abilities */
+       config.abilities |= pause_mask;
+       /* If the abilities have changed, then set the new config */
+       if (config.abilities != abilities.abilities) {
+               /* Auto restart link so settings take effect */
+               if (atomic_restart)
+                       config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+               /* Copy over all the old settings */
+               config.phy_type = abilities.phy_type;
+               config.link_speed = abilities.link_speed;
+               config.eee_capability = abilities.eee_capability;
+               config.eeer = abilities.eeer_val;
+               config.low_power_ctrl = abilities.d3_lpan;
+               status = i40e_aq_set_phy_config(hw, &config, NULL);
+
+               if (status)
+                       *aq_failures |= I40E_SET_FC_AQ_FAIL_SET;
+       }
+       /* Update the link info */
+       status = i40e_update_link_info(hw, true);
+       if (status) {
+               /* Wait a little bit (on 40G cards it sometimes takes a really
+                * long time for link to come back from the atomic reset)
+                * and try once more
+                */
+               msleep(1000);
+               status = i40e_update_link_info(hw, true);
+       }
+       if (status)
+               *aq_failures |= I40E_SET_FC_AQ_FAIL_UPDATE;
+
+       return status;
+}
+
 /**
  * i40e_aq_clear_pxe_mode
  * @hw: pointer to the hw struct
@@ -971,12 +1275,14 @@ i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
 /**
  * i40e_aq_set_link_restart_an
  * @hw: pointer to the hw struct
+ * @enable_link: if true: enable link, if false: disable link
  * @cmd_details: pointer to command details structure or NULL
  *
  * Sets up the link and restarts the Auto-Negotiation over the link.
  **/
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-                               struct i40e_asq_cmd_details *cmd_details)
+                                       bool enable_link,
+                                       struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
        struct i40e_aqc_set_link_restart_an *cmd =
@@ -987,6 +1293,10 @@ i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
                                          i40e_aqc_opc_set_link_restart_an);
 
        cmd->command = I40E_AQ_PHY_RESTART_AN;
+       if (enable_link)
+               cmd->command |= I40E_AQ_PHY_LINK_ENABLE;
+       else
+               cmd->command &= ~I40E_AQ_PHY_LINK_ENABLE;
 
        status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
 
@@ -1011,6 +1321,7 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
                (struct i40e_aqc_get_link_status *)&desc.params.raw;
        struct i40e_link_status *hw_link_info = &hw->phy.link_info;
        i40e_status status;
+       bool tx_pause, rx_pause;
        u16 command_flags;
 
        i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_link_status);
@@ -1040,6 +1351,18 @@ i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
        hw_link_info->max_frame_size = le16_to_cpu(resp->max_frame_size);
        hw_link_info->pacing = resp->config & I40E_AQ_CONFIG_PACING_MASK;
 
+       /* update fc info */
+       tx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_TX);
+       rx_pause = !!(resp->an_info & I40E_AQ_LINK_PAUSE_RX);
+       if (tx_pause & rx_pause)
+               hw->fc.current_mode = I40E_FC_FULL;
+       else if (tx_pause)
+               hw->fc.current_mode = I40E_FC_TX_PAUSE;
+       else if (rx_pause)
+               hw->fc.current_mode = I40E_FC_RX_PAUSE;
+       else
+               hw->fc.current_mode = I40E_FC_NONE;
+
        if (resp->config & I40E_AQ_CONFIG_CRC_ENA)
                hw_link_info->crc_enable = true;
        else
@@ -1061,6 +1384,35 @@ aq_get_link_info_exit:
        return status;
 }
 
+/**
+ * i40e_update_link_info
+ * @hw: pointer to the hw struct
+ * @enable_lse: enable/disable LinkStatusEvent reporting
+ *
+ * Returns the link status of the adapter
+ **/
+i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse)
+{
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       i40e_status status;
+
+       status = i40e_aq_get_link_info(hw, enable_lse, NULL, NULL);
+       if (status)
+               return status;
+
+       status = i40e_aq_get_phy_capabilities(hw, false, false,
+                                             &abilities, NULL);
+       if (status)
+               return status;
+
+       if (abilities.abilities & I40E_AQ_PHY_AN_ENABLED)
+               hw->phy.link_info.an_enabled = true;
+       else
+               hw->phy.link_info.an_enabled = false;
+
+       return status;
+}
+
 /**
  * i40e_aq_add_vsi
  * @hw: pointer to the hw struct
@@ -1649,6 +2001,35 @@ i40e_status i40e_aq_send_msg_to_vf(struct i40e_hw *hw, u16 vfid,
        return status;
 }
 
+/**
+ * i40e_aq_debug_write_register
+ * @hw: pointer to the hw struct
+ * @reg_addr: register address
+ * @reg_val: register value
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Write to a register using the admin queue commands
+ **/
+i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+                                       u32 reg_addr, u64 reg_val,
+                                       struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_debug_reg_read_write *cmd =
+               (struct i40e_aqc_debug_reg_read_write *)&desc.params.raw;
+       i40e_status status;
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_debug_write_reg);
+
+       cmd->address = cpu_to_le32(reg_addr);
+       cmd->value_high = cpu_to_le32((u32)(reg_val >> 32));
+       cmd->value_low = cpu_to_le32((u32)(reg_val & 0xFFFFFFFF));
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+       return status;
+}
+
 /**
  * i40e_aq_set_hmc_resource_profile
  * @hw: pointer to the hw struct
@@ -1796,6 +2177,47 @@ i40e_aq_read_nvm_exit:
        return status;
 }
 
+/**
+ * i40e_aq_erase_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in the module (expressed in 4 KB from module's beginning)
+ * @length: length of the section to be erased (expressed in 4 KB)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Erase the NVM sector using the admin queue commands
+ **/
+i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+                             u32 offset, u16 length, bool last_command,
+                             struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_nvm_update *cmd =
+               (struct i40e_aqc_nvm_update *)&desc.params.raw;
+       i40e_status status;
+
+       /* In offset the highest byte must be zeroed. */
+       if (offset & 0xFF000000) {
+               status = I40E_ERR_PARAM;
+               goto i40e_aq_erase_nvm_exit;
+       }
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_erase);
+
+       /* If this is the last command in a series, set the proper flag. */
+       if (last_command)
+               cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+       cmd->module_pointer = module_pointer;
+       cmd->offset = cpu_to_le32(offset);
+       cmd->length = cpu_to_le16(length);
+
+       status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+i40e_aq_erase_nvm_exit:
+       return status;
+}
+
 #define I40E_DEV_FUNC_CAP_SWITCH_MODE  0x01
 #define I40E_DEV_FUNC_CAP_MGMT_MODE    0x02
 #define I40E_DEV_FUNC_CAP_NPAR         0x03
@@ -1839,7 +2261,6 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
        struct i40e_aqc_list_capabilities_element_resp *cap;
        u32 number, logical_id, phys_id;
        struct i40e_hw_capabilities *p;
-       u32 reg_val;
        u32 i = 0;
        u16 id;
 
@@ -1910,11 +2331,7 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
                        break;
                case I40E_DEV_FUNC_CAP_RSS:
                        p->rss = true;
-                       reg_val = rd32(hw, I40E_PFQF_CTL_0);
-                       if (reg_val & I40E_PFQF_CTL_0_HASHLUTSIZE_MASK)
-                               p->rss_table_size = number;
-                       else
-                               p->rss_table_size = 128;
+                       p->rss_table_size = number;
                        p->rss_table_entry_width = logical_id;
                        break;
                case I40E_DEV_FUNC_CAP_RX_QUEUES:
@@ -2030,6 +2447,53 @@ exit:
        return status;
 }
 
+/**
+ * i40e_aq_update_nvm
+ * @hw: pointer to the hw struct
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: byte offset from the module beginning
+ * @length: length of the section to be written (in bytes from the offset)
+ * @data: command buffer (size [bytes] = length)
+ * @last_command: tells if this is the last command in a series
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Update the NVM using the admin queue commands
+ **/
+i40e_status i40e_aq_update_nvm(struct i40e_hw *hw, u8 module_pointer,
+                              u32 offset, u16 length, void *data,
+                              bool last_command,
+                              struct i40e_asq_cmd_details *cmd_details)
+{
+       struct i40e_aq_desc desc;
+       struct i40e_aqc_nvm_update *cmd =
+               (struct i40e_aqc_nvm_update *)&desc.params.raw;
+       i40e_status status;
+
+       /* In offset the highest byte must be zeroed. */
+       if (offset & 0xFF000000) {
+               status = I40E_ERR_PARAM;
+               goto i40e_aq_update_nvm_exit;
+       }
+
+       i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_nvm_update);
+
+       /* If this is the last command in a series, set the proper flag. */
+       if (last_command)
+               cmd->command_flags |= I40E_AQ_NVM_LAST_CMD;
+       cmd->module_pointer = module_pointer;
+       cmd->offset = cpu_to_le32(offset);
+       cmd->length = cpu_to_le16(length);
+
+       desc.flags |= cpu_to_le16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+       if (length > I40E_AQ_LARGE_BUF)
+               desc.flags |= cpu_to_le16((u16)I40E_AQ_FLAG_LB);
+
+       status = i40e_asq_send_command(hw, &desc, data, length, cmd_details);
+
+i40e_aq_update_nvm_exit:
+       return status;
+}
+
 /**
  * i40e_aq_get_lldp_mib
  * @hw: pointer to the hw struct
index cffdfc2..5a0cabe 100644 (file)
@@ -697,6 +697,25 @@ static void i40e_dbg_dump_vsi_seid(struct i40e_pf *pf, int seid)
                         vsi->bw_ets_limit_credits[i],
                         vsi->bw_ets_max_quanta[i]);
        }
+#ifdef I40E_FCOE
+       if (vsi->type == I40E_VSI_FCOE) {
+               dev_info(&pf->pdev->dev,
+                        "    fcoe_stats: rx_packets = %llu, rx_dwords = %llu, rx_dropped = %llu\n",
+                        vsi->fcoe_stats.rx_fcoe_packets,
+                        vsi->fcoe_stats.rx_fcoe_dwords,
+                        vsi->fcoe_stats.rx_fcoe_dropped);
+               dev_info(&pf->pdev->dev,
+                        "    fcoe_stats: tx_packets = %llu, tx_dwords = %llu\n",
+                        vsi->fcoe_stats.tx_fcoe_packets,
+                        vsi->fcoe_stats.tx_fcoe_dwords);
+               dev_info(&pf->pdev->dev,
+                        "    fcoe_stats: bad_crc = %llu, last_error = %llu\n",
+                        vsi->fcoe_stats.fcoe_bad_fccrc,
+                        vsi->fcoe_stats.fcoe_last_error);
+               dev_info(&pf->pdev->dev, "    fcoe_stats: ddp_count = %llu\n",
+                        vsi->fcoe_stats.fcoe_ddp_count);
+       }
+#endif
 }
 
 /**
@@ -1238,7 +1257,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
        } else if (strncmp(cmd_buf, "add pvid", 8) == 0) {
                i40e_status ret;
                u16 vid;
-               int v;
+               unsigned int v;
 
                cnt = sscanf(&cmd_buf[8], "%i %u", &vsi_seid, &v);
                if (cnt != 2) {
@@ -1254,7 +1273,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
-               vid = (unsigned)v;
+               vid = v;
                ret = i40e_vsi_add_pvid(vsi, vid);
                if (!ret)
                        dev_info(&pf->pdev->dev,
@@ -1743,6 +1762,9 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, false);
        } else if (strncmp(cmd_buf, "fd-atr on", 9) == 0) {
                i40e_dbg_cmd_fd_ctrl(pf, I40E_FLAG_FD_ATR_ENABLED, true);
+       } else if (strncmp(cmd_buf, "fd current cnt", 14) == 0) {
+               dev_info(&pf->pdev->dev, "FD current total filter count for this interface: %d\n",
+                        i40e_get_current_fd_count(pf));
        } else if (strncmp(cmd_buf, "lldp", 4) == 0) {
                if (strncmp(&cmd_buf[5], "stop", 4) == 0) {
                        int ret;
@@ -1830,7 +1852,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
 
                        ret = i40e_aq_get_lldp_mib(&pf->hw,
                                        I40E_AQ_LLDP_BRIDGE_TYPE_NEAREST_BRIDGE,
-                                       I40E_AQ_LLDP_MIB_LOCAL,
+                                       I40E_AQ_LLDP_MIB_REMOTE,
                                        buff, I40E_LLDPDU_SIZE,
                                        &llen, &rlen, NULL);
                        if (ret) {
@@ -1962,6 +1984,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                dev_info(&pf->pdev->dev, "  rem fd_filter <dest q_index> <flex_off> <pctype> <dest_vsi> <dest_ctl> <fd_status> <cnt_index> <fd_id> <packet_len> <packet>\n");
                dev_info(&pf->pdev->dev, "  fd-atr off\n");
                dev_info(&pf->pdev->dev, "  fd-atr on\n");
+               dev_info(&pf->pdev->dev, "  fd current cnt");
                dev_info(&pf->pdev->dev, "  lldp start\n");
                dev_info(&pf->pdev->dev, "  lldp stop\n");
                dev_info(&pf->pdev->dev, "  lldp get local\n");
index 4a488ff..681a9e8 100644 (file)
@@ -155,6 +155,19 @@ static struct i40e_stats i40e_gstrings_stats[] = {
        I40E_PF_STAT("rx_lpi_count", stats.rx_lpi_count),
 };
 
+#ifdef I40E_FCOE
+static const struct i40e_stats i40e_gstrings_fcoe_stats[] = {
+       I40E_VSI_STAT("fcoe_bad_fccrc", fcoe_stats.fcoe_bad_fccrc),
+       I40E_VSI_STAT("rx_fcoe_dropped", fcoe_stats.rx_fcoe_dropped),
+       I40E_VSI_STAT("rx_fcoe_packets", fcoe_stats.rx_fcoe_packets),
+       I40E_VSI_STAT("rx_fcoe_dwords", fcoe_stats.rx_fcoe_dwords),
+       I40E_VSI_STAT("fcoe_ddp_count", fcoe_stats.fcoe_ddp_count),
+       I40E_VSI_STAT("fcoe_last_error", fcoe_stats.fcoe_last_error),
+       I40E_VSI_STAT("tx_fcoe_packets", fcoe_stats.tx_fcoe_packets),
+       I40E_VSI_STAT("tx_fcoe_dwords", fcoe_stats.tx_fcoe_dwords),
+};
+
+#endif /* I40E_FCOE */
 #define I40E_QUEUE_STATS_LEN(n) \
        (((struct i40e_netdev_priv *)netdev_priv((n)))->vsi->num_queue_pairs \
            * 2 /* Tx and Rx together */                                     \
@@ -162,9 +175,17 @@ static struct i40e_stats i40e_gstrings_stats[] = {
 #define I40E_GLOBAL_STATS_LEN  ARRAY_SIZE(i40e_gstrings_stats)
 #define I40E_NETDEV_STATS_LEN   ARRAY_SIZE(i40e_gstrings_net_stats)
 #define I40E_MISC_STATS_LEN    ARRAY_SIZE(i40e_gstrings_misc_stats)
+#ifdef I40E_FCOE
+#define I40E_FCOE_STATS_LEN    ARRAY_SIZE(i40e_gstrings_fcoe_stats)
+#define I40E_VSI_STATS_LEN(n)  (I40E_NETDEV_STATS_LEN + \
+                                I40E_FCOE_STATS_LEN + \
+                                I40E_MISC_STATS_LEN + \
+                                I40E_QUEUE_STATS_LEN((n)))
+#else
 #define I40E_VSI_STATS_LEN(n)   (I40E_NETDEV_STATS_LEN + \
                                 I40E_MISC_STATS_LEN + \
                                 I40E_QUEUE_STATS_LEN((n)))
+#endif /* I40E_FCOE */
 #define I40E_PFC_STATS_LEN ( \
                (FIELD_SIZEOF(struct i40e_pf, stats.priority_xoff_rx) + \
                 FIELD_SIZEOF(struct i40e_pf, stats.priority_xon_rx) + \
@@ -215,52 +236,135 @@ static int i40e_get_settings(struct net_device *netdev,
        /* hardware is either in 40G mode or 10G mode
         * NOTE: this section initializes supported and advertising
         */
+       if (!link_up) {
+               /* link is down and the driver needs to fall back on
+                * device ID to determine what kinds of info to display,
+                * it's mostly a guess that may change when link is up
+                */
+               switch (hw->device_id) {
+               case I40E_DEV_ID_QSFP_A:
+               case I40E_DEV_ID_QSFP_B:
+               case I40E_DEV_ID_QSFP_C:
+                       /* pluggable QSFP */
+                       ecmd->supported = SUPPORTED_40000baseSR4_Full |
+                                         SUPPORTED_40000baseCR4_Full |
+                                         SUPPORTED_40000baseLR4_Full;
+                       ecmd->advertising = ADVERTISED_40000baseSR4_Full |
+                                           ADVERTISED_40000baseCR4_Full |
+                                           ADVERTISED_40000baseLR4_Full;
+                       break;
+               case I40E_DEV_ID_KX_B:
+                       /* backplane 40G */
+                       ecmd->supported = SUPPORTED_40000baseKR4_Full;
+                       ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+                       break;
+               case I40E_DEV_ID_KX_C:
+                       /* backplane 10G */
+                       ecmd->supported = SUPPORTED_10000baseKR_Full;
+                       ecmd->advertising = ADVERTISED_10000baseKR_Full;
+                       break;
+               default:
+                       /* all the rest are 10G/1G */
+                       ecmd->supported = SUPPORTED_10000baseT_Full |
+                                         SUPPORTED_1000baseT_Full;
+                       ecmd->advertising = ADVERTISED_10000baseT_Full |
+                                           ADVERTISED_1000baseT_Full;
+                       break;
+               }
+
+               /* skip phy_type use as it is zero when link is down */
+               goto no_valid_phy_type;
+       }
+
        switch (hw_link_info->phy_type) {
        case I40E_PHY_TYPE_40GBASE_CR4:
        case I40E_PHY_TYPE_40GBASE_CR4_CU:
-               ecmd->supported = SUPPORTED_40000baseCR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseCR4_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseCR4_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseCR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_KR4:
-               ecmd->supported = SUPPORTED_40000baseKR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseKR4_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_40000baseKR4_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_40000baseKR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_SR4:
+       case I40E_PHY_TYPE_XLPPI:
+       case I40E_PHY_TYPE_XLAUI:
                ecmd->supported = SUPPORTED_40000baseSR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseSR4_Full;
                break;
        case I40E_PHY_TYPE_40GBASE_LR4:
                ecmd->supported = SUPPORTED_40000baseLR4_Full;
-               ecmd->advertising = ADVERTISED_40000baseLR4_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_KX4:
-               ecmd->supported = SUPPORTED_10000baseKX4_Full;
-               ecmd->advertising = ADVERTISED_10000baseKX4_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseKX4_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_10000baseKX4_Full;
                break;
        case I40E_PHY_TYPE_10GBASE_KR:
-               ecmd->supported = SUPPORTED_10000baseKR_Full;
-               ecmd->advertising = ADVERTISED_10000baseKR_Full;
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseKR_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_10000baseKR_Full;
                break;
-       default:
-               if (i40e_is_40G_device(hw->device_id)) {
-                       ecmd->supported = SUPPORTED_40000baseSR4_Full;
-                       ecmd->advertising = ADVERTISED_40000baseSR4_Full;
-               } else {
-                       ecmd->supported = SUPPORTED_10000baseT_Full;
-                       ecmd->advertising = ADVERTISED_10000baseT_Full;
-               }
+       case I40E_PHY_TYPE_10GBASE_SR:
+       case I40E_PHY_TYPE_10GBASE_LR:
+               ecmd->supported = SUPPORTED_10000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_10GBASE_CR1_CU:
+       case I40E_PHY_TYPE_10GBASE_CR1:
+       case I40E_PHY_TYPE_10GBASE_T:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_10000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_10000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_XAUI:
+       case I40E_PHY_TYPE_XFI:
+       case I40E_PHY_TYPE_SFI:
+       case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+               ecmd->supported = SUPPORTED_10000baseT_Full;
                break;
+       case I40E_PHY_TYPE_1000BASE_KX:
+       case I40E_PHY_TYPE_1000BASE_T:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseT_Full;
+               break;
+       case I40E_PHY_TYPE_100BASE_TX:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_100baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_100baseT_Full;
+               break;
+       case I40E_PHY_TYPE_SGMII:
+               ecmd->supported = SUPPORTED_Autoneg |
+                                 SUPPORTED_1000baseT_Full |
+                                 SUPPORTED_100baseT_Full;
+               ecmd->advertising = ADVERTISED_Autoneg |
+                                   ADVERTISED_1000baseT_Full |
+                                   ADVERTISED_100baseT_Full;
+               break;
+       default:
+               /* if we got here and link is up something bad is afoot */
+               WARN_ON(link_up);
        }
 
-       ecmd->supported |= SUPPORTED_Autoneg;
-       ecmd->advertising |= ADVERTISED_Autoneg;
+no_valid_phy_type:
+       /* this is if autoneg is enabled or disabled */
        ecmd->autoneg = ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
                          AUTONEG_ENABLE : AUTONEG_DISABLE);
 
        switch (hw->phy.media_type) {
        case I40E_MEDIA_TYPE_BACKPLANE:
-               ecmd->supported |= SUPPORTED_Backplane;
-               ecmd->advertising |= ADVERTISED_Backplane;
+               ecmd->supported |= SUPPORTED_Autoneg |
+                                  SUPPORTED_Backplane;
+               ecmd->advertising |= ADVERTISED_Autoneg |
+                                    ADVERTISED_Backplane;
                ecmd->port = PORT_NONE;
                break;
        case I40E_MEDIA_TYPE_BASET:
@@ -276,7 +380,6 @@ static int i40e_get_settings(struct net_device *netdev,
                break;
        case I40E_MEDIA_TYPE_FIBER:
                ecmd->supported |= SUPPORTED_FIBRE;
-               ecmd->advertising |= ADVERTISED_FIBRE;
                ecmd->port = PORT_FIBRE;
                break;
        case I40E_MEDIA_TYPE_UNKNOWN:
@@ -287,6 +390,25 @@ static int i40e_get_settings(struct net_device *netdev,
 
        ecmd->transceiver = XCVR_EXTERNAL;
 
+       ecmd->supported |= SUPPORTED_Pause;
+
+       switch (hw->fc.current_mode) {
+       case I40E_FC_FULL:
+               ecmd->advertising |= ADVERTISED_Pause;
+               break;
+       case I40E_FC_TX_PAUSE:
+               ecmd->advertising |= ADVERTISED_Asym_Pause;
+               break;
+       case I40E_FC_RX_PAUSE:
+               ecmd->advertising |= (ADVERTISED_Pause |
+                                     ADVERTISED_Asym_Pause);
+               break;
+       default:
+               ecmd->advertising &= ~(ADVERTISED_Pause |
+                                      ADVERTISED_Asym_Pause);
+               break;
+       }
+
        if (link_up) {
                switch (link_speed) {
                case I40E_LINK_SPEED_40GB:
@@ -296,6 +418,9 @@ static int i40e_get_settings(struct net_device *netdev,
                case I40E_LINK_SPEED_10GB:
                        ethtool_cmd_speed_set(ecmd, SPEED_10000);
                        break;
+               case I40E_LINK_SPEED_1GB:
+                       ethtool_cmd_speed_set(ecmd, SPEED_1000);
+                       break;
                default:
                        break;
                }
@@ -308,6 +433,182 @@ static int i40e_get_settings(struct net_device *netdev,
        return 0;
 }
 
+/**
+ * i40e_set_settings - Set Speed and Duplex
+ * @netdev: network interface device structure
+ * @ecmd: ethtool command
+ *
+ * Set speed/duplex per media_types advertised/forced
+ **/
+static int i40e_set_settings(struct net_device *netdev,
+                            struct ethtool_cmd *ecmd)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_aq_get_phy_abilities_resp abilities;
+       struct i40e_aq_set_phy_config config;
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw = &pf->hw;
+       struct ethtool_cmd safe_ecmd;
+       i40e_status status = 0;
+       bool change = false;
+       int err = 0;
+       u8 autoneg;
+       u32 advertise;
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
+       if (hw->phy.media_type != I40E_MEDIA_TYPE_BASET &&
+           hw->phy.media_type != I40E_MEDIA_TYPE_FIBER &&
+           hw->phy.media_type != I40E_MEDIA_TYPE_BACKPLANE)
+               return -EOPNOTSUPP;
+
+       /* get our own copy of the bits to check against */
+       memset(&safe_ecmd, 0, sizeof(struct ethtool_cmd));
+       i40e_get_settings(netdev, &safe_ecmd);
+
+       /* save autoneg and speed out of ecmd */
+       autoneg = ecmd->autoneg;
+       advertise = ecmd->advertising;
+
+       /* set autoneg and speed back to what they currently are */
+       ecmd->autoneg = safe_ecmd.autoneg;
+       ecmd->advertising = safe_ecmd.advertising;
+
+       ecmd->cmd = safe_ecmd.cmd;
+       /* If ecmd and safe_ecmd are not the same now, then they are
+        * trying to set something that we do not support
+        */
+       if (memcmp(ecmd, &safe_ecmd, sizeof(struct ethtool_cmd)))
+               return -EOPNOTSUPP;
+
+       while (test_bit(__I40E_CONFIG_BUSY, &vsi->state))
+               usleep_range(1000, 2000);
+
+       /* Get the current phy config */
+       status = i40e_aq_get_phy_capabilities(hw, false, false, &abilities,
+                                             NULL);
+       if (status)
+               return -EAGAIN;
+
+       /* Copy link_speed and abilities to config in case they are not
+        * set below
+        */
+       memset(&config, 0, sizeof(struct i40e_aq_set_phy_config));
+       config.link_speed = abilities.link_speed;
+       config.abilities = abilities.abilities;
+
+       /* Check autoneg */
+       if (autoneg == AUTONEG_ENABLE) {
+               /* If autoneg is not supported, return error */
+               if (!(safe_ecmd.supported & SUPPORTED_Autoneg)) {
+                       netdev_info(netdev, "Autoneg not supported on this phy\n");
+                       return -EINVAL;
+               }
+               /* If autoneg was not already enabled */
+               if (!(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED)) {
+                       config.abilities = abilities.abilities |
+                                          I40E_AQ_PHY_ENABLE_AN;
+                       change = true;
+               }
+       } else {
+               /* If autoneg is supported 10GBASE_T is the only phy that
+                * can disable it, so otherwise return error
+                */
+               if (safe_ecmd.supported & SUPPORTED_Autoneg &&
+                   hw->phy.link_info.phy_type != I40E_PHY_TYPE_10GBASE_T) {
+                       netdev_info(netdev, "Autoneg cannot be disabled on this phy\n");
+                       return -EINVAL;
+               }
+               /* If autoneg is currently enabled */
+               if (hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) {
+                       config.abilities = abilities.abilities |
+                                          ~I40E_AQ_PHY_ENABLE_AN;
+                       change = true;
+               }
+       }
+
+       if (advertise & ~safe_ecmd.supported)
+               return -EINVAL;
+
+       if (advertise & ADVERTISED_100baseT_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_100MB)) {
+                       config.link_speed |= I40E_LINK_SPEED_100MB;
+                       change = true;
+               }
+       if (advertise & ADVERTISED_1000baseT_Full ||
+           advertise & ADVERTISED_1000baseKX_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_1GB)) {
+                       config.link_speed |= I40E_LINK_SPEED_1GB;
+                       change = true;
+               }
+       if (advertise & ADVERTISED_10000baseT_Full ||
+           advertise & ADVERTISED_10000baseKX4_Full ||
+           advertise & ADVERTISED_10000baseKR_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_10GB)) {
+                       config.link_speed |= I40E_LINK_SPEED_10GB;
+                       change = true;
+               }
+       if (advertise & ADVERTISED_40000baseKR4_Full ||
+           advertise & ADVERTISED_40000baseCR4_Full ||
+           advertise & ADVERTISED_40000baseSR4_Full ||
+           advertise & ADVERTISED_40000baseLR4_Full)
+               if (!(abilities.link_speed & I40E_LINK_SPEED_40GB)) {
+                       config.link_speed |= I40E_LINK_SPEED_40GB;
+                       change = true;
+               }
+
+       if (change) {
+               /* copy over the rest of the abilities */
+               config.phy_type = abilities.phy_type;
+               config.eee_capability = abilities.eee_capability;
+               config.eeer = abilities.eeer_val;
+               config.low_power_ctrl = abilities.d3_lpan;
+
+               /* If link is up set link and an so changes take effect */
+               if (hw->phy.link_info.link_info & I40E_AQ_LINK_UP)
+                       config.abilities |= I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+
+               /* make the aq call */
+               status = i40e_aq_set_phy_config(hw, &config, NULL);
+               if (status) {
+                       netdev_info(netdev, "Set phy config failed with error %d.\n",
+                                   status);
+                       return -EAGAIN;
+               }
+
+               status = i40e_update_link_info(hw, true);
+               if (status)
+                       netdev_info(netdev, "Updating link info failed with error %d\n",
+                                   status);
+
+       } else {
+               netdev_info(netdev, "Nothing changed, exiting without setting anything.\n");
+       }
+
+       return err;
+}
+
+static int i40e_nway_reset(struct net_device *netdev)
+{
+       /* restart autonegotiation */
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       bool link_up = hw->phy.link_info.link_info & I40E_AQ_LINK_UP;
+       i40e_status ret = 0;
+
+       ret = i40e_aq_set_link_restart_an(hw, link_up, NULL);
+       if (ret) {
+               netdev_info(netdev, "link restart failed, aq_err=%d\n",
+                           pf->hw.aq.asq_last_status);
+               return -EIO;
+       }
+
+       return 0;
+}
+
 /**
  * i40e_get_pauseparam -  Get Flow Control status
  * Return tx/rx-pause status
@@ -334,6 +635,85 @@ static void i40e_get_pauseparam(struct net_device *netdev,
        }
 }
 
+/**
+ * i40e_set_pauseparam - Set Flow Control parameter
+ * @netdev: network interface device structure
+ * @pause: return tx/rx flow control status
+ **/
+static int i40e_set_pauseparam(struct net_device *netdev,
+                              struct ethtool_pauseparam *pause)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_link_status *hw_link_info = &hw->phy.link_info;
+       bool link_up = hw_link_info->link_info & I40E_AQ_LINK_UP;
+       i40e_status status;
+       u8 aq_failures;
+       int err = 0;
+
+       if (vsi != pf->vsi[pf->lan_vsi])
+               return -EOPNOTSUPP;
+
+       if (pause->autoneg != ((hw_link_info->an_info & I40E_AQ_AN_COMPLETED) ?
+           AUTONEG_ENABLE : AUTONEG_DISABLE)) {
+               netdev_info(netdev, "To change autoneg please use: ethtool -s <dev> autoneg <on|off>\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* If we have link and don't have autoneg */
+       if (!test_bit(__I40E_DOWN, &pf->state) &&
+           !(hw_link_info->an_info & I40E_AQ_AN_COMPLETED)) {
+               /* Send message that it might not necessarily work*/
+               netdev_info(netdev, "Autoneg did not complete so changing settings may not result in an actual change.\n");
+       }
+
+       if (hw->fc.current_mode == I40E_FC_PFC) {
+               netdev_info(netdev, "Priority flow control enabled. Cannot set link flow control.\n");
+               return -EOPNOTSUPP;
+       }
+
+       if (pause->rx_pause && pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_FULL;
+       else if (pause->rx_pause && !pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_RX_PAUSE;
+       else if (!pause->rx_pause && pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_TX_PAUSE;
+       else if (!pause->rx_pause && !pause->tx_pause)
+               hw->fc.requested_mode = I40E_FC_NONE;
+       else
+                return -EINVAL;
+
+       /* Set the fc mode and only restart an if link is up*/
+       status = i40e_set_fc(hw, &aq_failures, link_up);
+
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_GET) {
+               netdev_info(netdev, "Set fc failed on the get_phy_capabilities call with error %d and status %d\n",
+                           status, hw->aq.asq_last_status);
+               err = -EAGAIN;
+       }
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_SET) {
+               netdev_info(netdev, "Set fc failed on the set_phy_config call with error %d and status %d\n",
+                           status, hw->aq.asq_last_status);
+               err = -EAGAIN;
+       }
+       if (aq_failures & I40E_SET_FC_AQ_FAIL_UPDATE) {
+               netdev_info(netdev, "Set fc failed on the update_link_info call with error %d and status %d\n",
+                           status, hw->aq.asq_last_status);
+               err = -EAGAIN;
+       }
+
+       if (!test_bit(__I40E_DOWN, &pf->state)) {
+               /* Give it a little more time to try to come back */
+               msleep(75);
+               if (!test_bit(__I40E_DOWN, &pf->state))
+                       return i40e_nway_reset(netdev);
+       }
+
+       return err;
+}
+
 static u32 i40e_get_msglevel(struct net_device *netdev)
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
@@ -404,10 +784,33 @@ static int i40e_get_eeprom(struct net_device *netdev,
        u8 *eeprom_buff;
        u16 i, sectors;
        bool last;
+       u32 magic;
+
 #define I40E_NVM_SECTOR_SIZE  4096
        if (eeprom->len == 0)
                return -EINVAL;
 
+       /* check for NVMUpdate access method */
+       magic = hw->vendor_id | (hw->device_id << 16);
+       if (eeprom->magic && eeprom->magic != magic) {
+               int errno;
+
+               /* make sure it is the right magic for NVMUpdate */
+               if ((eeprom->magic >> 16) != hw->device_id)
+                       return -EINVAL;
+
+               ret_val = i40e_nvmupd_command(hw,
+                                             (struct i40e_nvm_access *)eeprom,
+                                             bytes, &errno);
+               if (ret_val)
+                       dev_info(&pf->pdev->dev,
+                                "NVMUpdate read failed err=%d status=0x%x\n",
+                                ret_val, hw->aq.asq_last_status);
+
+               return errno;
+       }
+
+       /* normal ethtool get_eeprom support */
        eeprom->magic = hw->vendor_id | (hw->device_id << 16);
 
        eeprom_buff = kzalloc(eeprom->len, GFP_KERNEL);
@@ -434,7 +837,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
                ret_val = i40e_aq_read_nvm(hw, 0x0,
                                eeprom->offset + (I40E_NVM_SECTOR_SIZE * i),
                                len,
-                               eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
+                               (u8 *)eeprom_buff + (I40E_NVM_SECTOR_SIZE * i),
                                last, NULL);
                if (ret_val) {
                        dev_info(&pf->pdev->dev,
@@ -446,7 +849,7 @@ static int i40e_get_eeprom(struct net_device *netdev,
 
 release_nvm:
        i40e_release_nvm(hw);
-       memcpy(bytes, eeprom_buff, eeprom->len);
+       memcpy(bytes, (u8 *)eeprom_buff, eeprom->len);
 free_buff:
        kfree(eeprom_buff);
        return ret_val;
@@ -466,6 +869,39 @@ static int i40e_get_eeprom_len(struct net_device *netdev)
        return val;
 }
 
+static int i40e_set_eeprom(struct net_device *netdev,
+                          struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_hw *hw = &np->vsi->back->hw;
+       struct i40e_pf *pf = np->vsi->back;
+       int ret_val = 0;
+       int errno;
+       u32 magic;
+
+       /* normal ethtool set_eeprom is not supported */
+       magic = hw->vendor_id | (hw->device_id << 16);
+       if (eeprom->magic == magic)
+               return -EOPNOTSUPP;
+
+       /* check for NVMUpdate access method */
+       if (!eeprom->magic || (eeprom->magic >> 16) != hw->device_id)
+               return -EINVAL;
+
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state) ||
+           test_bit(__I40E_RESET_INTR_RECEIVED, &pf->state))
+               return -EBUSY;
+
+       ret_val = i40e_nvmupd_command(hw, (struct i40e_nvm_access *)eeprom,
+                                     bytes, &errno);
+       if (ret_val)
+               dev_info(&pf->pdev->dev,
+                        "NVMUpdate write failed err=%d status=0x%x\n",
+                        ret_val, hw->aq.asq_last_status);
+
+       return errno;
+}
+
 static void i40e_get_drvinfo(struct net_device *netdev,
                             struct ethtool_drvinfo *drvinfo)
 {
@@ -697,6 +1133,13 @@ static void i40e_get_ethtool_stats(struct net_device *netdev,
                data[i++] = (i40e_gstrings_misc_stats[j].sizeof_stat ==
                            sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
        }
+#ifdef I40E_FCOE
+       for (j = 0; j < I40E_FCOE_STATS_LEN; j++) {
+               p = (char *)vsi + i40e_gstrings_fcoe_stats[j].stat_offset;
+               data[i++] = (i40e_gstrings_fcoe_stats[j].sizeof_stat ==
+                       sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+       }
+#endif
        rcu_read_lock();
        for (j = 0; j < vsi->num_queue_pairs; j++) {
                tx_ring = ACCESS_ONCE(vsi->tx_rings[j]);
@@ -778,6 +1221,13 @@ static void i40e_get_strings(struct net_device *netdev, u32 stringset,
                                 i40e_gstrings_misc_stats[i].stat_string);
                        p += ETH_GSTRING_LEN;
                }
+#ifdef I40E_FCOE
+               for (i = 0; i < I40E_FCOE_STATS_LEN; i++) {
+                       snprintf(p, ETH_GSTRING_LEN, "%s",
+                                i40e_gstrings_fcoe_stats[i].stat_string);
+                       p += ETH_GSTRING_LEN;
+               }
+#endif
                for (i = 0; i < vsi->num_queue_pairs; i++) {
                        snprintf(p, ETH_GSTRING_LEN, "tx-%u.tx_packets", i);
                        p += ETH_GSTRING_LEN;
@@ -1021,24 +1471,6 @@ static int i40e_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
        return 0;
 }
 
-static int i40e_nway_reset(struct net_device *netdev)
-{
-       /* restart autonegotiation */
-       struct i40e_netdev_priv *np = netdev_priv(netdev);
-       struct i40e_pf *pf = np->vsi->back;
-       struct i40e_hw *hw = &pf->hw;
-       i40e_status ret = 0;
-
-       ret = i40e_aq_set_link_restart_an(hw, NULL);
-       if (ret) {
-               netdev_info(netdev, "link restart failed, aq_err=%d\n",
-                           pf->hw.aq.asq_last_status);
-               return -EIO;
-       }
-
-       return 0;
-}
-
 static int i40e_set_phys_id(struct net_device *netdev,
                            enum ethtool_phys_id_state state)
 {
@@ -1105,17 +1537,36 @@ static int i40e_set_coalesce(struct net_device *netdev,
        if (ec->tx_max_coalesced_frames_irq || ec->rx_max_coalesced_frames_irq)
                vsi->work_limit = ec->tx_max_coalesced_frames_irq;
 
+       vector = vsi->base_vector;
        if ((ec->rx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
-           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+           (ec->rx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
                vsi->rx_itr_setting = ec->rx_coalesce_usecs;
-       else
+       } else if (ec->rx_coalesce_usecs == 0) {
+               vsi->rx_itr_setting = ec->rx_coalesce_usecs;
+               i40e_irq_dynamic_disable(vsi, vector);
+               if (ec->use_adaptive_rx_coalesce)
+                       netif_info(pf, drv, netdev,
+                                  "Rx-secs=0, need to disable adaptive-Rx for a complete disable\n");
+       } else {
+               netif_info(pf, drv, netdev,
+                          "Invalid value, Rx-usecs range is 0, 8-8160\n");
                return -EINVAL;
+       }
 
        if ((ec->tx_coalesce_usecs >= (I40E_MIN_ITR << 1)) &&
-           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1)))
+           (ec->tx_coalesce_usecs <= (I40E_MAX_ITR << 1))) {
                vsi->tx_itr_setting = ec->tx_coalesce_usecs;
-       else
+       } else if (ec->tx_coalesce_usecs == 0) {
+               vsi->tx_itr_setting = ec->tx_coalesce_usecs;
+               i40e_irq_dynamic_disable(vsi, vector);
+               if (ec->use_adaptive_tx_coalesce)
+                       netif_info(pf, drv, netdev,
+                                  "Tx-secs=0, need to disable adaptive-Tx for a complete disable\n");
+       } else {
+               netif_info(pf, drv, netdev,
+                          "Invalid value, Tx-usecs range is 0, 8-8160\n");
                return -EINVAL;
+       }
 
        if (ec->use_adaptive_rx_coalesce)
                vsi->rx_itr_setting |= I40E_ITR_DYNAMIC;
@@ -1127,7 +1578,6 @@ static int i40e_set_coalesce(struct net_device *netdev,
        else
                vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
 
-       vector = vsi->base_vector;
        for (i = 0; i < vsi->num_q_vectors; i++, vector++) {
                q_vector = vsi->q_vectors[i];
                q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
@@ -1731,6 +2181,7 @@ static int i40e_set_channels(struct net_device *dev,
 
 static const struct ethtool_ops i40e_ethtool_ops = {
        .get_settings           = i40e_get_settings,
+       .set_settings           = i40e_set_settings,
        .get_drvinfo            = i40e_get_drvinfo,
        .get_regs_len           = i40e_get_regs_len,
        .get_regs               = i40e_get_regs,
@@ -1738,11 +2189,13 @@ static const struct ethtool_ops i40e_ethtool_ops = {
        .get_link               = ethtool_op_get_link,
        .get_wol                = i40e_get_wol,
        .set_wol                = i40e_set_wol,
+       .set_eeprom             = i40e_set_eeprom,
        .get_eeprom_len         = i40e_get_eeprom_len,
        .get_eeprom             = i40e_get_eeprom,
        .get_ringparam          = i40e_get_ringparam,
        .set_ringparam          = i40e_set_ringparam,
        .get_pauseparam         = i40e_get_pauseparam,
+       .set_pauseparam         = i40e_set_pauseparam,
        .get_msglevel           = i40e_get_msglevel,
        .set_msglevel           = i40e_set_msglevel,
        .get_rxnfc              = i40e_get_rxnfc,
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.c b/drivers/net/ethernet/intel/i40e/i40e_fcoe.c
new file mode 100644 (file)
index 0000000..6938fc1
--- /dev/null
@@ -0,0 +1,1561 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+
+#include <linux/if_ether.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/fc/fc_fs.h>
+#include <scsi/fc/fc_fip.h>
+#include <scsi/fc/fc_fcoe.h>
+#include <scsi/libfc.h>
+#include <scsi/libfcoe.h>
+
+#include "i40e.h"
+#include "i40e_fcoe.h"
+
+/**
+ * i40e_rx_is_fip - returns true if the rx packet type is FIP
+ * @ptype: the packet type field from rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fip(u16 ptype)
+{
+       return ptype == I40E_RX_PTYPE_L2_FIP_PAY2;
+}
+
+/**
+ * i40e_rx_is_fcoe - returns true if the rx packet type is FCoE
+ * @ptype: the packet type field from rx descriptor write-back
+ **/
+static inline bool i40e_rx_is_fcoe(u16 ptype)
+{
+       return (ptype >= I40E_RX_PTYPE_L2_FCOE_PAY3) &&
+              (ptype <= I40E_RX_PTYPE_L2_FCOE_VFT_FCOTHER);
+}
+
+/**
+ * i40e_fcoe_sof_is_class2 - returns true if this is a FC Class 2 SOF
+ * @sof: the FCoE start of frame delimiter
+ **/
+static inline bool i40e_fcoe_sof_is_class2(u8 sof)
+{
+       return (sof == FC_SOF_I2) || (sof == FC_SOF_N2);
+}
+
+/**
+ * i40e_fcoe_sof_is_class3 - returns true if this is a FC Class 3 SOF
+ * @sof: the FCoE start of frame delimiter
+ **/
+static inline bool i40e_fcoe_sof_is_class3(u8 sof)
+{
+       return (sof == FC_SOF_I3) || (sof == FC_SOF_N3);
+}
+
+/**
+ * i40e_fcoe_sof_is_supported - returns true if the FC SOF is supported by HW
+ * @sof: the input SOF value from the frame
+ **/
+static inline bool i40e_fcoe_sof_is_supported(u8 sof)
+{
+       return i40e_fcoe_sof_is_class2(sof) ||
+              i40e_fcoe_sof_is_class3(sof);
+}
+
+/**
+ * i40e_fcoe_fc_sof - pull the SOF from FCoE header in the frame
+ * @skb: the frame whose EOF is to be pulled from
+ **/
+static inline int i40e_fcoe_fc_sof(struct sk_buff *skb, u8 *sof)
+{
+       *sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof;
+
+       if (!i40e_fcoe_sof_is_supported(*sof))
+               return -EINVAL;
+       return 0;
+}
+
+/**
+ * i40e_fcoe_eof_is_supported - returns true if the EOF is supported by HW
+ * @eof:     the input EOF value from the frame
+ **/
+static inline bool i40e_fcoe_eof_is_supported(u8 eof)
+{
+       return (eof == FC_EOF_N) || (eof == FC_EOF_T) ||
+              (eof == FC_EOF_NI) || (eof == FC_EOF_A);
+}
+
+/**
+ * i40e_fcoe_fc_eof - pull EOF from FCoE trailer in the frame
+ * @skb: the frame whose EOF is to be pulled from
+ **/
+static inline int i40e_fcoe_fc_eof(struct sk_buff *skb, u8 *eof)
+{
+       /* the first byte of the last dword is EOF */
+       skb_copy_bits(skb, skb->len - 4, eof, 1);
+
+       if (!i40e_fcoe_eof_is_supported(*eof))
+               return -EINVAL;
+       return 0;
+}
+
+/**
+ * i40e_fcoe_ctxt_eof - convert input FC EOF for descriptor programming
+ * @eof: the input eof value from the frame
+ *
+ * The FC EOF is converted to the value understood by HW for descriptor
+ * programming. Never call this w/o calling i40e_fcoe_eof_is_supported()
+ * first.
+ **/
+static inline u32 i40e_fcoe_ctxt_eof(u8 eof)
+{
+       switch (eof) {
+       case FC_EOF_N:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_N;
+       case FC_EOF_T:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_T;
+       case FC_EOF_NI:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_NI;
+       case FC_EOF_A:
+               return I40E_TX_DESC_CMD_L4T_EOFT_EOF_A;
+       default:
+               /* FIXME: still returns 0 */
+               pr_err("Unrecognized EOF %x\n", eof);
+               return 0;
+       }
+}
+
+/**
+ * i40e_fcoe_xid_is_valid - returns true if the exchange id is valid
+ * @xid: the exchange id
+ **/
+static inline bool i40e_fcoe_xid_is_valid(u16 xid)
+{
+       return (xid != FC_XID_UNKNOWN) && (xid < I40E_FCOE_DDP_MAX);
+}
+
+/**
+ * i40e_fcoe_ddp_unmap - unmap the mapped sglist associated
+ * @pf: pointer to pf
+ * @ddp: sw DDP context
+ *
+ * Unmap the scatter-gather list associated with the given SW DDP context
+ *
+ * Returns: data length already ddp-ed in bytes
+ *
+ **/
+static inline void i40e_fcoe_ddp_unmap(struct i40e_pf *pf,
+                                      struct i40e_fcoe_ddp *ddp)
+{
+       if (test_and_set_bit(__I40E_FCOE_DDP_UNMAPPED, &ddp->flags))
+               return;
+
+       if (ddp->sgl) {
+               dma_unmap_sg(&pf->pdev->dev, ddp->sgl, ddp->sgc,
+                            DMA_FROM_DEVICE);
+               ddp->sgl = NULL;
+               ddp->sgc = 0;
+       }
+
+       if (ddp->pool) {
+               dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
+               ddp->pool = NULL;
+       }
+}
+
+/**
+ * i40e_fcoe_ddp_clear - clear the given SW DDP context
+ * @ddp - SW DDP context
+ **/
+static inline void i40e_fcoe_ddp_clear(struct i40e_fcoe_ddp *ddp)
+{
+       memset(ddp, 0, sizeof(struct i40e_fcoe_ddp));
+       ddp->xid = FC_XID_UNKNOWN;
+       ddp->flags = __I40E_FCOE_DDP_NONE;
+}
+
+/**
+ * i40e_fcoe_progid_is_fcoe - check if the prog_id is for FCoE
+ * @id: the prog id for the programming status Rx descriptor write-back
+ **/
+static inline bool i40e_fcoe_progid_is_fcoe(u8 id)
+{
+       return (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
+              (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS);
+}
+
+/**
+ * i40e_fcoe_fc_get_xid - get xid from the frame header
+ * @fh: the fc frame header
+ *
+ * In case the incoming frame's exchange is originated from
+ * the initiator, then received frame's exchange id is ANDed
+ * with fc_cpu_mask bits to get the same cpu on which exchange
+ * was originated, otherwise just use the current cpu.
+ *
+ * Returns ox_id if exchange originator, rx_id if responder
+ **/
+static inline u16 i40e_fcoe_fc_get_xid(struct fc_frame_header *fh)
+{
+       u32 f_ctl = ntoh24(fh->fh_f_ctl);
+
+       return (f_ctl & FC_FC_EX_CTX) ?
+               be16_to_cpu(fh->fh_ox_id) :
+               be16_to_cpu(fh->fh_rx_id);
+}
+
+/**
+ * i40e_fcoe_fc_frame_header - get fc frame header from skb
+ * @skb: packet
+ *
+ * This checks if there is a VLAN header and returns the data
+ * pointer to the start of the fc_frame_header.
+ *
+ * Returns pointer to the fc_frame_header
+ **/
+static inline struct fc_frame_header *i40e_fcoe_fc_frame_header(
+       struct sk_buff *skb)
+{
+       void *fh = skb->data + sizeof(struct fcoe_hdr);
+
+       if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q))
+               fh += sizeof(struct vlan_hdr);
+
+       return (struct fc_frame_header *)fh;
+}
+
+/**
+ * i40e_fcoe_ddp_put - release the DDP context for a given exchange id
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id that corresponding DDP context will be released
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_done
+ * and it is expected to be called by ULD, i.e., FCP layer of libfc
+ * to release the corresponding ddp context when the I/O is done.
+ *
+ * Returns : data length already ddp-ed in bytes
+ **/
+static int i40e_fcoe_ddp_put(struct net_device *netdev, u16 xid)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       int len = 0;
+       struct i40e_fcoe_ddp *ddp = &fcoe->ddp[xid];
+
+       if (!fcoe || !ddp)
+               goto out;
+
+       if (test_bit(__I40E_FCOE_DDP_DONE, &ddp->flags))
+               len = ddp->len;
+       i40e_fcoe_ddp_unmap(pf, ddp);
+out:
+       return len;
+}
+
+/**
+ * i40e_fcoe_sw_init - sets up the HW for FCoE
+ * @pf: pointer to pf
+ *
+ * Returns 0 if FCoE is supported otherwise the error code
+ **/
+int i40e_init_pf_fcoe(struct i40e_pf *pf)
+{
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+       pf->num_fcoe_qps = 0;
+       pf->fcoe_hmc_cntx_num = 0;
+       pf->fcoe_hmc_filt_num = 0;
+
+       if (!pf->hw.func_caps.fcoe) {
+               dev_info(&pf->pdev->dev, "FCoE capability is disabled\n");
+               return 0;
+       }
+
+       if (!pf->hw.func_caps.dcb) {
+               dev_warn(&pf->pdev->dev,
+                        "Hardware is not DCB capable not enabling FCoE.\n");
+               return 0;
+       }
+
+       /* enable FCoE hash filter */
+       val = rd32(hw, I40E_PFQF_HENA(1));
+       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_OX - 32);
+       val |= 1 << (I40E_FILTER_PCTYPE_FCOE_RX - 32);
+       val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
+       wr32(hw, I40E_PFQF_HENA(1), val);
+
+       /* enable flag */
+       pf->flags |= I40E_FLAG_FCOE_ENABLED;
+       pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
+
+       /* Reserve 4K DDP contexts and 20K filter size for FCoE */
+       pf->fcoe_hmc_cntx_num = (1 << I40E_DMA_CNTX_SIZE_4K) *
+                                I40E_DMA_CNTX_BASE_SIZE;
+       pf->fcoe_hmc_filt_num = pf->fcoe_hmc_cntx_num +
+                               (1 << I40E_HASH_FILTER_SIZE_16K) *
+                               I40E_HASH_FILTER_BASE_SIZE;
+
+       /* FCoE object: max 16K filter buckets and 4K DMA contexts */
+       pf->filter_settings.fcoe_filt_num = I40E_HASH_FILTER_SIZE_16K;
+       pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
+
+       /* Setup max frame with FCoE_MTU plus L2 overheads */
+       val = rd32(hw, I40E_GLFCOE_RCTL);
+       val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
+       val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
+                << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
+       wr32(hw, I40E_GLFCOE_RCTL, val);
+
+       dev_info(&pf->pdev->dev, "FCoE is supported.\n");
+       return 0;
+}
+
+/**
+ * i40e_get_fcoe_tc_map - Return TC map for FCoE APP
+ * @pf: pointer to pf
+ *
+ **/
+u8 i40e_get_fcoe_tc_map(struct i40e_pf *pf)
+{
+       struct i40e_ieee_app_priority_table app;
+       struct i40e_hw *hw = &pf->hw;
+       u8 enabled_tc = 0;
+       u8 tc, i;
+       /* Get the FCoE APP TLV */
+       struct i40e_dcbx_config *dcbcfg = &hw->local_dcbx_config;
+
+       for (i = 0; i < dcbcfg->numapps; i++) {
+               app = dcbcfg->app[i];
+               if (app.selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE &&
+                   app.protocolid == ETH_P_FCOE) {
+                       tc = dcbcfg->etscfg.prioritytable[app.priority];
+                       enabled_tc |= (1 << tc);
+                       break;
+               }
+       }
+
+       /* TC0 if there is no TC defined for FCoE APP TLV */
+       enabled_tc = enabled_tc ? enabled_tc : 0x1;
+
+       return enabled_tc;
+}
+
+/**
+ * i40e_fcoe_vsi_init - prepares the VSI context for creating a FCoE VSI
+ * @vsi: pointer to the associated VSI struct
+ * @ctxt: pointer to the associated VSI context to be passed to HW
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+int i40e_fcoe_vsi_init(struct i40e_vsi *vsi, struct i40e_vsi_context *ctxt)
+{
+       struct i40e_aqc_vsi_properties_data *info = &ctxt->info;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u8 enabled_tc = 0;
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
+               dev_err(&pf->pdev->dev,
+                       "FCoE is not enabled for this device\n");
+               return -EPERM;
+       }
+
+       /* initialize the hardware for FCoE */
+       ctxt->pf_num = hw->pf_id;
+       ctxt->vf_num = 0;
+       ctxt->uplink_seid = vsi->uplink_seid;
+       ctxt->connection_type = 0x1;
+       ctxt->flags = I40E_AQ_VSI_TYPE_PF;
+
+       /* FCoE VSI would need the following sections */
+       info->valid_sections |= cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID |
+                                           I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+
+       /* FCoE VSI does not need these sections */
+       info->valid_sections &= cpu_to_le16(~(I40E_AQ_VSI_PROP_SECURITY_VALID |
+                                           I40E_AQ_VSI_PROP_VLAN_VALID |
+                                           I40E_AQ_VSI_PROP_CAS_PV_VALID |
+                                           I40E_AQ_VSI_PROP_INGRESS_UP_VALID |
+                                           I40E_AQ_VSI_PROP_EGRESS_UP_VALID));
+
+       enabled_tc = i40e_get_fcoe_tc_map(pf);
+       i40e_vsi_setup_queue_map(vsi, ctxt, enabled_tc, true);
+
+       /* set up queue option section: only enable FCoE */
+       info->queueing_opt_flags = I40E_AQ_VSI_QUE_OPT_FCOE_ENA;
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_enable - this is the implementation of ndo_fcoe_enable,
+ * indicating the upper FCoE protocol stack is ready to use FCoE
+ * offload features.
+ *
+ * @netdev: pointer to the netdev that FCoE is created on
+ *
+ * Returns 0 on success
+ *
+ * in RTNL
+ *
+ **/
+int i40e_fcoe_enable(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
+               netdev_err(netdev, "HW does not support FCoE.\n");
+               return -ENODEV;
+       }
+
+       if (vsi->type != I40E_VSI_FCOE) {
+               netdev_err(netdev, "interface does not support FCoE.\n");
+               return -EBUSY;
+       }
+
+       atomic_inc(&fcoe->refcnt);
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_disable- disables FCoE for upper FCoE protocol stack.
+ * @dev: pointer to the netdev that FCoE is created on
+ *
+ * Returns 0 on success
+ *
+ **/
+int i40e_fcoe_disable(struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED)) {
+               netdev_err(netdev, "device does not support FCoE\n");
+               return -ENODEV;
+       }
+       if (vsi->type != I40E_VSI_FCOE)
+               return -EBUSY;
+
+       if (!atomic_dec_and_test(&fcoe->refcnt))
+               return -EINVAL;
+
+       netdev_info(netdev, "FCoE disabled\n");
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_dma_pool_free - free the per cpu pool for FCoE DDP
+ * @fcoe: the FCoE sw object
+ * @dev: the device that the pool is associated with
+ * @cpu: the cpu for this pool
+ *
+ **/
+static void i40e_fcoe_dma_pool_free(struct i40e_fcoe *fcoe,
+                                   struct device *dev,
+                                   unsigned int cpu)
+{
+       struct i40e_fcoe_ddp_pool *ddp_pool;
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+       if (!ddp_pool->pool) {
+               dev_warn(dev, "DDP pool already freed for cpu %d\n", cpu);
+               return;
+       }
+       dma_pool_destroy(ddp_pool->pool);
+       ddp_pool->pool = NULL;
+}
+
+/**
+ * i40e_fcoe_dma_pool_create - per cpu pool for FCoE DDP
+ * @fcoe: the FCoE sw object
+ * @dev: the device that the pool is associated with
+ * @cpu: the cpu for this pool
+ *
+ * Returns 0 on successful or non zero on failure
+ *
+ **/
+static int i40e_fcoe_dma_pool_create(struct i40e_fcoe *fcoe,
+                                    struct device *dev,
+                                    unsigned int cpu)
+{
+       struct i40e_fcoe_ddp_pool *ddp_pool;
+       struct dma_pool *pool;
+       char pool_name[32];
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu);
+       if (ddp_pool && ddp_pool->pool) {
+               dev_warn(dev, "DDP pool already allocated for cpu %d\n", cpu);
+               return 0;
+       }
+       snprintf(pool_name, sizeof(pool_name), "i40e_fcoe_ddp_%d", cpu);
+       pool = dma_pool_create(pool_name, dev, I40E_FCOE_DDP_PTR_MAX,
+                              I40E_FCOE_DDP_PTR_ALIGN, PAGE_SIZE);
+       if (!pool) {
+               dev_err(dev, "dma_pool_create %s failed\n", pool_name);
+               return -ENOMEM;
+       }
+       ddp_pool->pool = pool;
+       return 0;
+}
+
+/**
+ * i40e_fcoe_free_ddp_resources - release FCoE DDP resources
+ * @vsi: the vsi FCoE is associated with
+ *
+ **/
+void i40e_fcoe_free_ddp_resources(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       int cpu, i;
+
+       /* do nothing if not FCoE VSI */
+       if (vsi->type != I40E_VSI_FCOE)
+               return;
+
+       /* do nothing if no DDP pools were allocated */
+       if (!fcoe->ddp_pool)
+               return;
+
+       for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
+               i40e_fcoe_ddp_put(vsi->netdev, i);
+
+       for_each_possible_cpu(cpu)
+               i40e_fcoe_dma_pool_free(fcoe, &pf->pdev->dev, cpu);
+
+       free_percpu(fcoe->ddp_pool);
+       fcoe->ddp_pool = NULL;
+
+       netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources released\n",
+                   vsi->id, vsi->seid);
+}
+
+/**
+ * i40e_fcoe_setup_ddp_resources - allocate per cpu DDP resources
+ * @vsi: the VSI FCoE is associated with
+ *
+ * Returns 0 on successful or non zero on failure
+ *
+ **/
+int i40e_fcoe_setup_ddp_resources(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct device *dev = &pf->pdev->dev;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       unsigned int cpu;
+       int i;
+
+       if (vsi->type != I40E_VSI_FCOE)
+               return -ENODEV;
+
+       /* do nothing if no DDP pools were allocated */
+       if (fcoe->ddp_pool)
+               return -EEXIST;
+
+       /* allocate per CPU memory to track DDP pools */
+       fcoe->ddp_pool = alloc_percpu(struct i40e_fcoe_ddp_pool);
+       if (!fcoe->ddp_pool) {
+               dev_err(&pf->pdev->dev, "failed to allocate percpu DDP\n");
+               return -ENOMEM;
+       }
+
+       /* allocate pci pool for each cpu */
+       for_each_possible_cpu(cpu) {
+               if (!i40e_fcoe_dma_pool_create(fcoe, dev, cpu))
+                       continue;
+
+               dev_err(dev, "failed to alloc DDP pool on cpu:%d\n", cpu);
+               i40e_fcoe_free_ddp_resources(vsi);
+               return -ENOMEM;
+       }
+
+       /* initialize the sw context */
+       for (i = 0; i < I40E_FCOE_DDP_MAX; i++)
+               i40e_fcoe_ddp_clear(&fcoe->ddp[i]);
+
+       netdev_info(vsi->netdev, "VSI %d,%d FCoE DDP resources allocated\n",
+                   vsi->id, vsi->seid);
+
+       return 0;
+}
+
+/**
+ * i40e_fcoe_handle_status - check the Programming Status for FCoE
+ * @rx_ring: the Rx ring for this descriptor
+ * @rx_desc: the Rx descriptor for Programming Status, not a packet descriptor.
+ *
+ * Check if this is the Rx Programming Status descriptor write-back for FCoE.
+ * This is used to verify if the context/filter programming or invalidation
+ * requested by SW to the HW is successful or not and take actions accordingly.
+ **/
+void i40e_fcoe_handle_status(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc, u8 prog_id)
+{
+       struct i40e_pf *pf = rx_ring->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       struct i40e_fcoe_ddp *ddp;
+       u32 error;
+       u16 xid;
+       u64 qw;
+
+       /* we only care for FCoE here */
+       if (!i40e_fcoe_progid_is_fcoe(prog_id))
+               return;
+
+       xid = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param) &
+             (I40E_FCOE_DDP_MAX - 1);
+
+       if (!i40e_fcoe_xid_is_valid(xid))
+               return;
+
+       ddp = &fcoe->ddp[xid];
+       WARN_ON(xid != ddp->xid);
+
+       qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       error = (qw & I40E_RX_PROG_STATUS_DESC_QW1_ERROR_MASK) >>
+               I40E_RX_PROG_STATUS_DESC_QW1_ERROR_SHIFT;
+
+       /* DDP context programming status: failure or success */
+       if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) {
+               if (I40E_RX_PROG_FCOE_ERROR_TBL_FULL(error)) {
+                       dev_err(&pf->pdev->dev, "xid %x ddp->xid %x TABLE FULL\n",
+                               xid, ddp->xid);
+                       ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT;
+               }
+               if (I40E_RX_PROG_FCOE_ERROR_CONFLICT(error)) {
+                       dev_err(&pf->pdev->dev, "xid %x ddp->xid %x CONFLICT\n",
+                               xid, ddp->xid);
+                       ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT;
+               }
+       }
+
+       /* DDP context invalidation status: failure or success */
+       if (prog_id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS) {
+               if (I40E_RX_PROG_FCOE_ERROR_INVLFAIL(error)) {
+                       dev_err(&pf->pdev->dev, "xid %x ddp->xid %x INVALIDATION FAILURE\n",
+                               xid, ddp->xid);
+                       ddp->prerr |= I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT;
+               }
+               /* clear the flag so we can retry invalidation */
+               clear_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags);
+       }
+
+       /* unmap DMA */
+       i40e_fcoe_ddp_unmap(pf, ddp);
+       i40e_fcoe_ddp_clear(ddp);
+}
+
+/**
+ * i40e_fcoe_handle_offload - check ddp status and mark it done
+ * @adapter: i40e adapter
+ * @rx_desc: advanced rx descriptor
+ * @skb: the skb holding the received data
+ *
+ * This checks ddp status.
+ *
+ * Returns : < 0 indicates an error or not a FCOE ddp, 0 indicates
+ * not passing the skb to ULD, > 0 indicates is the length of data
+ * being ddped.
+ *
+ **/
+int i40e_fcoe_handle_offload(struct i40e_ring *rx_ring,
+                            union i40e_rx_desc *rx_desc,
+                            struct sk_buff *skb)
+{
+       struct i40e_pf *pf = rx_ring->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       struct fc_frame_header *fh = NULL;
+       struct i40e_fcoe_ddp *ddp = NULL;
+       u32 status, fltstat;
+       u32 error, fcerr;
+       int rc = -EINVAL;
+       u16 ptype;
+       u16 xid;
+       u64 qw;
+
+       /* check this rxd is for programming status */
+       qw = le64_to_cpu(rx_desc->wb.qword1.status_error_len);
+       /* packet descriptor, check packet type */
+       ptype = (qw & I40E_RXD_QW1_PTYPE_MASK) >> I40E_RXD_QW1_PTYPE_SHIFT;
+       if (!i40e_rx_is_fcoe(ptype))
+               goto out_no_ddp;
+
+       error = (qw & I40E_RXD_QW1_ERROR_MASK) >> I40E_RXD_QW1_ERROR_SHIFT;
+       fcerr = (error >> I40E_RX_DESC_ERROR_L3L4E_SHIFT) &
+                I40E_RX_DESC_FCOE_ERROR_MASK;
+
+       /* check stateless offload error */
+       if (unlikely(fcerr == I40E_RX_DESC_ERROR_L3L4E_PROT)) {
+               dev_err(&pf->pdev->dev, "Protocol Error\n");
+               skb->ip_summed = CHECKSUM_NONE;
+       } else {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
+       }
+
+       /* check hw status on ddp */
+       status = (qw & I40E_RXD_QW1_STATUS_MASK) >> I40E_RXD_QW1_STATUS_SHIFT;
+       fltstat = (status >> I40E_RX_DESC_STATUS_FLTSTAT_SHIFT) &
+                  I40E_RX_DESC_FLTSTAT_FCMASK;
+
+       /* now we are ready to check DDP */
+       fh = i40e_fcoe_fc_frame_header(skb);
+       xid = i40e_fcoe_fc_get_xid(fh);
+       if (!i40e_fcoe_xid_is_valid(xid))
+               goto out_no_ddp;
+
+       /* non DDP normal receive, return to the protocol stack */
+       if (fltstat == I40E_RX_DESC_FLTSTAT_NOMTCH)
+               goto out_no_ddp;
+
+       /* do we have a sw ddp context setup ? */
+       ddp = &fcoe->ddp[xid];
+       if (!ddp->sgl)
+               goto out_no_ddp;
+
+       /* fetch xid from hw rxd wb, which should match up the sw ctxt */
+       xid = le16_to_cpu(rx_desc->wb.qword0.lo_dword.mirr_fcoe.fcoe_ctx_id);
+       if (ddp->xid != xid) {
+               dev_err(&pf->pdev->dev, "xid 0x%x does not match ctx_xid 0x%x\n",
+                       ddp->xid, xid);
+               goto out_put_ddp;
+       }
+
+       /* the same exchange has already errored out */
+       if (ddp->fcerr) {
+               dev_err(&pf->pdev->dev, "xid 0x%x fcerr 0x%x reported fcer 0x%x\n",
+                       xid, ddp->fcerr, fcerr);
+               goto out_put_ddp;
+       }
+
+       /* fcoe param is valid by now with correct DDPed length */
+       ddp->len = le32_to_cpu(rx_desc->wb.qword0.hi_dword.fcoe_param);
+       ddp->fcerr = fcerr;
+       /* header posting only, useful only for target mode and debugging */
+       if (fltstat == I40E_RX_DESC_FLTSTAT_DDP) {
+               /* For target mode, we get header of the last packet but it
+                * does not have the FCoE trailer field, i.e., CRC and EOF
+                * Ordered Set since they are offloaded by the HW, so fill
+                * it up correspondingly to allow the packet to pass through
+                * to the upper protocol stack.
+                */
+               u32 f_ctl = ntoh24(fh->fh_f_ctl);
+
+               if ((f_ctl & FC_FC_END_SEQ) &&
+                   (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA)) {
+                       struct fcoe_crc_eof *crc = NULL;
+
+                       crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
+                       crc->fcoe_eof = FC_EOF_T;
+               } else {
+                       /* otherwise, drop the header only frame */
+                       rc = 0;
+                       goto out_no_ddp;
+               }
+       }
+
+out_put_ddp:
+       /* either we got RSP or we have an error, unmap DMA in both cases */
+       i40e_fcoe_ddp_unmap(pf, ddp);
+       if (ddp->len && !ddp->fcerr) {
+               int pkts;
+
+               rc = ddp->len;
+               i40e_fcoe_ddp_clear(ddp);
+               ddp->len = rc;
+               pkts = DIV_ROUND_UP(rc, 2048);
+               rx_ring->stats.bytes += rc;
+               rx_ring->stats.packets += pkts;
+               rx_ring->q_vector->rx.total_bytes += rc;
+               rx_ring->q_vector->rx.total_packets += pkts;
+               set_bit(__I40E_FCOE_DDP_DONE, &ddp->flags);
+       }
+
+out_no_ddp:
+       return rc;
+}
+
+/**
+ * i40e_fcoe_ddp_setup - called to set up ddp context
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ * @target_mode: indicates this is a DDP request for target
+ *
+ * Returns : 1 for success and 0 for no DDP on this I/O
+ **/
+static int i40e_fcoe_ddp_setup(struct net_device *netdev, u16 xid,
+                              struct scatterlist *sgl, unsigned int sgc,
+                              int target_mode)
+{
+       static const unsigned int bufflen = I40E_FCOE_DDP_BUF_MIN;
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_fcoe_ddp_pool *ddp_pool;
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       unsigned int i, j, dmacount;
+       struct i40e_fcoe_ddp *ddp;
+       unsigned int firstoff = 0;
+       unsigned int thisoff = 0;
+       unsigned int thislen = 0;
+       struct scatterlist *sg;
+       dma_addr_t addr = 0;
+       unsigned int len;
+
+       if (xid >= I40E_FCOE_DDP_MAX) {
+               dev_warn(&pf->pdev->dev, "xid=0x%x out-of-range\n", xid);
+               return 0;
+       }
+
+       /* no DDP if we are already down or resetting */
+       if (test_bit(__I40E_DOWN, &pf->state) ||
+           test_bit(__I40E_NEEDS_RESTART, &pf->state)) {
+               dev_info(&pf->pdev->dev, "xid=0x%x device in reset/down\n",
+                        xid);
+               return 0;
+       }
+
+       ddp = &fcoe->ddp[xid];
+       if (ddp->sgl) {
+               dev_info(&pf->pdev->dev, "xid 0x%x w/ non-null sgl=%p nents=%d\n",
+                        xid, ddp->sgl, ddp->sgc);
+               return 0;
+       }
+       i40e_fcoe_ddp_clear(ddp);
+
+       if (!fcoe->ddp_pool) {
+               dev_info(&pf->pdev->dev, "No DDP pool, xid 0x%x\n", xid);
+               return 0;
+       }
+
+       ddp_pool = per_cpu_ptr(fcoe->ddp_pool, get_cpu());
+       if (!ddp_pool->pool) {
+               dev_info(&pf->pdev->dev, "No percpu ddp pool, xid 0x%x\n", xid);
+               goto out_noddp;
+       }
+
+       /* setup dma from scsi command sgl */
+       dmacount = dma_map_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+       if (dmacount == 0) {
+               dev_info(&pf->pdev->dev, "dma_map_sg for sgl %p, sgc %d failed\n",
+                        sgl, sgc);
+               goto out_noddp_unmap;
+       }
+
+       /* alloc the udl from our ddp pool */
+       ddp->udl = dma_pool_alloc(ddp_pool->pool, GFP_ATOMIC, &ddp->udp);
+       if (!ddp->udl) {
+               dev_info(&pf->pdev->dev,
+                        "Failed allocated ddp context, xid 0x%x\n", xid);
+               goto out_noddp_unmap;
+       }
+
+       j = 0;
+       ddp->len = 0;
+       for_each_sg(sgl, sg, dmacount, i) {
+               addr = sg_dma_address(sg);
+               len = sg_dma_len(sg);
+               ddp->len += len;
+               while (len) {
+                       /* max number of buffers allowed in one DDP context */
+                       if (j >= I40E_FCOE_DDP_BUFFCNT_MAX) {
+                               dev_info(&pf->pdev->dev,
+                                        "xid=%x:%d,%d,%d:addr=%llx not enough descriptors\n",
+                                        xid, i, j, dmacount, (u64)addr);
+                               goto out_noddp_free;
+                       }
+
+                       /* get the offset of length of current buffer */
+                       thisoff = addr & ((dma_addr_t)bufflen - 1);
+                       thislen = min_t(unsigned int, (bufflen - thisoff), len);
+                       /* all but the 1st buffer (j == 0)
+                        * must be aligned on bufflen
+                        */
+                       if ((j != 0) && (thisoff))
+                               goto out_noddp_free;
+
+                       /* all but the last buffer
+                        * ((i == (dmacount - 1)) && (thislen == len))
+                        * must end at bufflen
+                        */
+                       if (((i != (dmacount - 1)) || (thislen != len)) &&
+                           ((thislen + thisoff) != bufflen))
+                               goto out_noddp_free;
+
+                       ddp->udl[j] = (u64)(addr - thisoff);
+                       /* only the first buffer may have none-zero offset */
+                       if (j == 0)
+                               firstoff = thisoff;
+                       len -= thislen;
+                       addr += thislen;
+                       j++;
+               }
+       }
+       /* only the last buffer may have non-full bufflen */
+       ddp->lastsize = thisoff + thislen;
+       ddp->firstoff = firstoff;
+       ddp->list_len = j;
+       ddp->pool = ddp_pool->pool;
+       ddp->sgl = sgl;
+       ddp->sgc = sgc;
+       ddp->xid = xid;
+       if (target_mode)
+               set_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
+       set_bit(__I40E_FCOE_DDP_INITALIZED, &ddp->flags);
+
+       put_cpu();
+       return 1; /* Success */
+
+out_noddp_free:
+       dma_pool_free(ddp->pool, ddp->udl, ddp->udp);
+       i40e_fcoe_ddp_clear(ddp);
+
+out_noddp_unmap:
+       dma_unmap_sg(&pf->pdev->dev, sgl, sgc, DMA_FROM_DEVICE);
+out_noddp:
+       put_cpu();
+       return 0;
+}
+
+/**
+ * i40e_fcoe_ddp_get - called to set up ddp context in initiator mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ **/
+static int i40e_fcoe_ddp_get(struct net_device *netdev, u16 xid,
+                            struct scatterlist *sgl, unsigned int sgc)
+{
+       return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0);
+}
+
+/**
+ * i40e_fcoe_ddp_target - called to set up ddp context in target mode
+ * @netdev: the corresponding net_device
+ * @xid: the exchange id requesting ddp
+ * @sgl: the scatter-gather list for this request
+ * @sgc: the number of scatter-gather items
+ *
+ * This is the implementation of net_device_ops.ndo_fcoe_ddp_target
+ * and is expected to be called from ULD, e.g., FCP layer of libfc
+ * to set up ddp for the corresponding xid of the given sglist for
+ * the corresponding I/O. The DDP in target mode is a write I/O request
+ * from the initiator.
+ *
+ * Returns : 1 for success and 0 for no ddp
+ **/
+static int i40e_fcoe_ddp_target(struct net_device *netdev, u16 xid,
+                               struct scatterlist *sgl, unsigned int sgc)
+{
+       return i40e_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1);
+}
+
+/**
+ * i40e_fcoe_program_ddp - programs the HW DDP related descriptors
+ * @tx_ring: transmit ring for this packet
+ * @skb:     the packet to be sent out
+ * @sof: the SOF to indicate class of service
+ *
+ * Determine if it is READ/WRITE command, and finds out if there is
+ * a matching SW DDP context for this command. DDP is applicable
+ * only in case of READ if initiator or WRITE in case of
+ * responder (via checking XFER_RDY).
+ *
+ * Note: caller checks sof and ddp sw context
+ *
+ * Returns : none
+ *
+ **/
+static void i40e_fcoe_program_ddp(struct i40e_ring *tx_ring,
+                                 struct sk_buff *skb,
+                                 struct i40e_fcoe_ddp *ddp, u8 sof)
+{
+       struct i40e_fcoe_filter_context_desc *filter_desc = NULL;
+       struct i40e_fcoe_queue_context_desc *queue_desc = NULL;
+       struct i40e_fcoe_ddp_context_desc *ddp_desc = NULL;
+       struct i40e_pf *pf = tx_ring->vsi->back;
+       u16 i = tx_ring->next_to_use;
+       struct fc_frame_header *fh;
+       u64 flags_rsvd_lanq = 0;
+       bool target_mode;
+
+       /* check if abort is still pending */
+       if (test_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags)) {
+               dev_warn(&pf->pdev->dev,
+                        "DDP abort is still pending xid:%hx and ddp->flags:%lx:\n",
+                        ddp->xid, ddp->flags);
+               return;
+       }
+
+       /* set the flag to indicate this is programmed */
+       if (test_and_set_bit(__I40E_FCOE_DDP_PROGRAMMED, &ddp->flags)) {
+               dev_warn(&pf->pdev->dev,
+                        "DDP is already programmed for xid:%hx and ddp->flags:%lx:\n",
+                        ddp->xid, ddp->flags);
+               return;
+       }
+
+       /* Prepare the DDP context descriptor */
+       ddp_desc = I40E_DDP_CONTEXT_DESC(tx_ring, i);
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       ddp_desc->type_cmd_foff_lsize =
+                               cpu_to_le64(I40E_TX_DESC_DTYPE_DDP_CTX  |
+                               ((u64)I40E_FCOE_DDP_CTX_DESC_BSIZE_4K  <<
+                               I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)        |
+                               ((u64)ddp->firstoff                    <<
+                               I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)       |
+                               ((u64)ddp->lastsize                    <<
+                               I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT));
+       ddp_desc->rsvd = cpu_to_le64(0);
+
+       /* target mode needs last packet in the sequence  */
+       target_mode = test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags);
+       if (target_mode)
+               ddp_desc->type_cmd_foff_lsize |=
+                       cpu_to_le64(I40E_FCOE_DDP_CTX_DESC_LASTSEQH);
+
+       /* Prepare queue_context descriptor */
+       queue_desc = I40E_QUEUE_CONTEXT_DESC(tx_ring, i++);
+       if (i == tx_ring->count)
+               i = 0;
+       queue_desc->dmaindx_fbase = cpu_to_le64(ddp->xid | ((u64)ddp->udp));
+       queue_desc->flen_tph = cpu_to_le64(ddp->list_len |
+                               ((u64)(I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC |
+                               I40E_FCOE_QUEUE_CTX_DESC_TPHDATA) <<
+                               I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT));
+
+       /* Prepare filter_context_desc */
+       filter_desc = I40E_FILTER_CONTEXT_DESC(tx_ring, i);
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       fh = (struct fc_frame_header *)skb_transport_header(skb);
+       filter_desc->param = cpu_to_le32(ntohl(fh->fh_parm_offset));
+       filter_desc->seqn = cpu_to_le16(ntohs(fh->fh_seq_cnt));
+       filter_desc->rsvd_dmaindx = cpu_to_le16(ddp->xid <<
+                               I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT);
+
+       flags_rsvd_lanq = I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP;
+       flags_rsvd_lanq |= (u64)(target_mode ?
+                       I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP :
+                       I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT);
+
+       flags_rsvd_lanq |= (u64)((sof == FC_SOF_I2 || sof == FC_SOF_N2) ?
+                       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2 :
+                       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3);
+
+       flags_rsvd_lanq |= ((u64)skb->queue_mapping <<
+                               I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT);
+       filter_desc->flags_rsvd_lanq = cpu_to_le64(flags_rsvd_lanq);
+
+       /* By this time, all offload related descriptors has been programmed */
+       tx_ring->next_to_use = i;
+}
+
+/**
+ * i40e_fcoe_invalidate_ddp - invalidates DDP in case of abort
+ * @tx_ring: transmit ring for this packet
+ * @skb: the packet associated w/ this DDP invalidation, i.e., ABTS
+ * @ddp: the SW DDP context for this DDP
+ *
+ * Programs the Tx context descriptor to do DDP invalidation.
+ **/
+static void i40e_fcoe_invalidate_ddp(struct i40e_ring *tx_ring,
+                                    struct sk_buff *skb,
+                                    struct i40e_fcoe_ddp *ddp)
+{
+       struct i40e_tx_context_desc *context_desc;
+       int i;
+
+       if (test_and_set_bit(__I40E_FCOE_DDP_ABORTED, &ddp->flags))
+               return;
+
+       i = tx_ring->next_to_use;
+       context_desc = I40E_TX_CTXTDESC(tx_ring, i);
+       i++;
+       if (i == tx_ring->count)
+               i = 0;
+
+       context_desc->tunneling_params = cpu_to_le32(0);
+       context_desc->l2tag2 = cpu_to_le16(0);
+       context_desc->rsvd = cpu_to_le16(0);
+       context_desc->type_cmd_tso_mss = cpu_to_le64(
+               I40E_TX_DESC_DTYPE_FCOE_CTX |
+               (I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL <<
+               I40E_TXD_CTX_QW1_CMD_SHIFT) |
+               (I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND <<
+               I40E_TXD_CTX_QW1_CMD_SHIFT));
+       tx_ring->next_to_use = i;
+}
+
+/**
+ * i40e_fcoe_handle_ddp - check we should setup or invalidate DDP
+ * @tx_ring: transmit ring for this packet
+ * @skb: the packet to be sent out
+ * @sof: the SOF to indicate class of service
+ *
+ * Determine if it is ABTS/READ/XFER_RDY, and finds out if there is
+ * a matching SW DDP context for this command. DDP is applicable
+ * only in case of READ if initiator or WRITE in case of
+ * responder (via checking XFER_RDY). In case this is an ABTS, send
+ * just invalidate the context.
+ **/
+static void i40e_fcoe_handle_ddp(struct i40e_ring *tx_ring,
+                                struct sk_buff *skb, u8 sof)
+{
+       struct i40e_pf *pf = tx_ring->vsi->back;
+       struct i40e_fcoe *fcoe = &pf->fcoe;
+       struct fc_frame_header *fh;
+       struct i40e_fcoe_ddp *ddp;
+       u32 f_ctl;
+       u8 r_ctl;
+       u16 xid;
+
+       fh = (struct fc_frame_header *)skb_transport_header(skb);
+       f_ctl = ntoh24(fh->fh_f_ctl);
+       r_ctl = fh->fh_r_ctl;
+       ddp = NULL;
+
+       if ((r_ctl == FC_RCTL_DD_DATA_DESC) && (f_ctl & FC_FC_EX_CTX)) {
+               /* exchange responder? if so, XFER_RDY for write */
+               xid = ntohs(fh->fh_rx_id);
+               if (i40e_fcoe_xid_is_valid(xid)) {
+                       ddp = &fcoe->ddp[xid];
+                       if ((ddp->xid == xid) &&
+                           (test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
+                               i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
+               }
+       } else if (r_ctl == FC_RCTL_DD_UNSOL_CMD) {
+               /* exchange originator, check READ cmd */
+               xid = ntohs(fh->fh_ox_id);
+               if (i40e_fcoe_xid_is_valid(xid)) {
+                       ddp = &fcoe->ddp[xid];
+                       if ((ddp->xid == xid) &&
+                           (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
+                               i40e_fcoe_program_ddp(tx_ring, skb, ddp, sof);
+               }
+       } else if (r_ctl == FC_RCTL_BA_ABTS) {
+               /* exchange originator, check ABTS */
+               xid = ntohs(fh->fh_ox_id);
+               if (i40e_fcoe_xid_is_valid(xid)) {
+                       ddp = &fcoe->ddp[xid];
+                       if ((ddp->xid == xid) &&
+                           (!test_bit(__I40E_FCOE_DDP_TARGET, &ddp->flags)))
+                               i40e_fcoe_invalidate_ddp(tx_ring, skb, ddp);
+               }
+       }
+}
+
+/**
+ * i40e_fcoe_tso - set up FCoE TSO
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @tx_flags: collected send information
+ * @hdr_len:  the tso header length
+ * @sof: the SOF to indicate class of service
+ *
+ * Note must already have sof checked to be either class 2 or class 3 before
+ * calling this function.
+ *
+ * Returns 1 to indicate sequence segmentation offload is properly setup
+ * or returns 0 to indicate no tso is needed, otherwise returns error
+ * code to drop the frame.
+ **/
+static int i40e_fcoe_tso(struct i40e_ring *tx_ring,
+                        struct sk_buff *skb,
+                        u32 tx_flags, u8 *hdr_len, u8 sof)
+{
+       struct i40e_tx_context_desc *context_desc;
+       u32 cd_type, cd_cmd, cd_tso_len, cd_mss;
+       struct fc_frame_header *fh;
+       u64 cd_type_cmd_tso_mss;
+
+       /* must match gso type as FCoE */
+       if (!skb_is_gso(skb))
+               return 0;
+
+       /* is it the expected gso type for FCoE ?*/
+       if (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) {
+               netdev_err(skb->dev,
+                          "wrong gso type %d:expecting SKB_GSO_FCOE\n",
+                          skb_shinfo(skb)->gso_type);
+               return -EINVAL;
+       }
+
+       /* header and trailer are inserted by hw */
+       *hdr_len = skb_transport_offset(skb) + sizeof(struct fc_frame_header) +
+                  sizeof(struct fcoe_crc_eof);
+
+       /* check sof to decide a class 2 or 3 TSO */
+       if (likely(i40e_fcoe_sof_is_class3(sof)))
+               cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3;
+       else
+               cd_cmd = I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2;
+
+       /* param field valid? */
+       fh = (struct fc_frame_header *)skb_transport_header(skb);
+       if (fh->fh_f_ctl[2] & FC_FC_REL_OFF)
+               cd_cmd |= I40E_FCOE_TX_CTX_DESC_RELOFF;
+
+       /* fill the field values */
+       cd_type = I40E_TX_DESC_DTYPE_FCOE_CTX;
+       cd_tso_len = skb->len - *hdr_len;
+       cd_mss = skb_shinfo(skb)->gso_size;
+       cd_type_cmd_tso_mss =
+               ((u64)cd_type  << I40E_TXD_CTX_QW1_DTYPE_SHIFT)     |
+               ((u64)cd_cmd     << I40E_TXD_CTX_QW1_CMD_SHIFT)     |
+               ((u64)cd_tso_len << I40E_TXD_CTX_QW1_TSO_LEN_SHIFT) |
+               ((u64)cd_mss     << I40E_TXD_CTX_QW1_MSS_SHIFT);
+
+       /* grab the next descriptor */
+       context_desc = I40E_TX_CTXTDESC(tx_ring, tx_ring->next_to_use);
+       tx_ring->next_to_use++;
+       if (tx_ring->next_to_use == tx_ring->count)
+               tx_ring->next_to_use = 0;
+
+       context_desc->tunneling_params = 0;
+       context_desc->l2tag2 = cpu_to_le16((tx_flags & I40E_TX_FLAGS_VLAN_MASK)
+                                           >> I40E_TX_FLAGS_VLAN_SHIFT);
+       context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
+
+       return 1;
+}
+
+/**
+ * i40e_fcoe_tx_map - build the tx descriptor
+ * @tx_ring:  ring to send buffer on
+ * @skb:      send buffer
+ * @first:    first buffer info buffer to use
+ * @tx_flags: collected send information
+ * @hdr_len:  ptr to the size of the packet header
+ * @eof:      the frame eof value
+ *
+ * Note, for FCoE, sof and eof are already checked
+ **/
+static void i40e_fcoe_tx_map(struct i40e_ring *tx_ring,
+                            struct sk_buff *skb,
+                            struct i40e_tx_buffer *first,
+                            u32 tx_flags, u8 hdr_len, u8 eof)
+{
+       u32 td_offset = 0;
+       u32 td_cmd = 0;
+       u32 maclen;
+
+       /* insert CRC */
+       td_cmd = I40E_TX_DESC_CMD_ICRC;
+
+       /* setup MACLEN */
+       maclen = skb_network_offset(skb);
+       if (tx_flags & I40E_TX_FLAGS_SW_VLAN)
+               maclen += sizeof(struct vlan_hdr);
+
+       if (skb->protocol == htons(ETH_P_FCOE)) {
+               /* for FCoE, maclen should exclude ether type */
+               maclen -= 2;
+               /* setup type as FCoE and EOF insertion */
+               td_cmd |= (I40E_TX_DESC_CMD_FCOET | i40e_fcoe_ctxt_eof(eof));
+               /* setup FCoELEN and FCLEN */
+               td_offset |= ((((sizeof(struct fcoe_hdr) + 2) >> 2) <<
+                               I40E_TX_DESC_LENGTH_IPLEN_SHIFT) |
+                             ((sizeof(struct fc_frame_header) >> 2) <<
+                               I40E_TX_DESC_LENGTH_L4_FC_LEN_SHIFT));
+               /* trim to exclude trailer */
+               pskb_trim(skb, skb->len - sizeof(struct fcoe_crc_eof));
+       }
+
+       /* MACLEN is ether header length in words not bytes */
+       td_offset |= (maclen >> 1) << I40E_TX_DESC_LENGTH_MACLEN_SHIFT;
+
+       return i40e_tx_map(tx_ring, skb, first, tx_flags, hdr_len,
+                          td_cmd, td_offset);
+}
+
+/**
+ * i40e_fcoe_set_skb_header - adjust skb header point for FIP/FCoE/FC
+ * @skb: the skb to be adjusted
+ *
+ * Returns true if this skb is a FCoE/FIP or VLAN carried FCoE/FIP and then
+ * adjusts the skb header pointers correspondingly. Otherwise, returns false.
+ **/
+static inline int i40e_fcoe_set_skb_header(struct sk_buff *skb)
+{
+       __be16 protocol = skb->protocol;
+
+       skb_reset_mac_header(skb);
+       skb->mac_len = sizeof(struct ethhdr);
+       if (protocol == htons(ETH_P_8021Q)) {
+               struct vlan_ethhdr *veth = (struct vlan_ethhdr *)eth_hdr(skb);
+
+               protocol = veth->h_vlan_encapsulated_proto;
+               skb->mac_len += sizeof(struct vlan_hdr);
+       }
+
+       /* FCoE or FIP only */
+       if ((protocol != htons(ETH_P_FIP)) &&
+           (protocol != htons(ETH_P_FCOE)))
+               return -EINVAL;
+
+       /* set header to L2 of FCoE/FIP */
+       skb_set_network_header(skb, skb->mac_len);
+       if (protocol == htons(ETH_P_FIP))
+               return 0;
+
+       /* set header to L3 of FC */
+       skb_set_transport_header(skb, skb->mac_len + sizeof(struct fcoe_hdr));
+       return 0;
+}
+
+/**
+ * i40e_fcoe_xmit_frame - transmit buffer
+ * @skb:     send buffer
+ * @netdev:  the fcoe netdev
+ *
+ * Returns 0 if sent, else an error code
+ **/
+static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
+                                       struct net_device *netdev)
+{
+       struct i40e_netdev_priv *np = netdev_priv(skb->dev);
+       struct i40e_vsi *vsi = np->vsi;
+       struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
+       struct i40e_tx_buffer *first;
+       u32 tx_flags = 0;
+       u8 hdr_len = 0;
+       u8 sof = 0;
+       u8 eof = 0;
+       int fso;
+
+       if (i40e_fcoe_set_skb_header(skb))
+               goto out_drop;
+
+       if (!i40e_xmit_descriptor_count(skb, tx_ring))
+               return NETDEV_TX_BUSY;
+
+       /* prepare the xmit flags */
+       if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
+               goto out_drop;
+
+       /* record the location of the first descriptor for this packet */
+       first = &tx_ring->tx_bi[tx_ring->next_to_use];
+
+       /* FIP is a regular L2 traffic w/o offload */
+       if (skb->protocol == htons(ETH_P_FIP))
+               goto out_send;
+
+       /* check sof and eof, only supports FC Class 2 or 3 */
+       if (i40e_fcoe_fc_sof(skb, &sof) || i40e_fcoe_fc_eof(skb, &eof)) {
+               netdev_err(netdev, "SOF/EOF error:%02x - %02x\n", sof, eof);
+               goto out_drop;
+       }
+
+       /* always do FCCRC for FCoE */
+       tx_flags |= I40E_TX_FLAGS_FCCRC;
+
+       /* check we should do sequence offload */
+       fso = i40e_fcoe_tso(tx_ring, skb, tx_flags, &hdr_len, sof);
+       if (fso < 0)
+               goto out_drop;
+       else if (fso)
+               tx_flags |= I40E_TX_FLAGS_FSO;
+       else
+               i40e_fcoe_handle_ddp(tx_ring, skb, sof);
+
+out_send:
+       /* send out the packet */
+       i40e_fcoe_tx_map(tx_ring, skb, first, tx_flags, hdr_len, eof);
+
+       i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
+       return NETDEV_TX_OK;
+
+out_drop:
+       dev_kfree_skb_any(skb);
+       return NETDEV_TX_OK;
+}
+
+/**
+ * i40e_fcoe_change_mtu - NDO callback to change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns error as operation not permitted
+ *
+ **/
+static int i40e_fcoe_change_mtu(struct net_device *netdev, int new_mtu)
+{
+       netdev_warn(netdev, "MTU change is not supported on FCoE interfaces\n");
+       return -EPERM;
+}
+
+/**
+ * i40e_fcoe_set_features - set the netdev feature flags
+ * @netdev: ptr to the netdev being adjusted
+ * @features: the feature set that the stack is suggesting
+ *
+ **/
+static int i40e_fcoe_set_features(struct net_device *netdev,
+                                 netdev_features_t features)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_vsi *vsi = np->vsi;
+
+       if (features & NETIF_F_HW_VLAN_CTAG_RX)
+               i40e_vlan_stripping_enable(vsi);
+       else
+               i40e_vlan_stripping_disable(vsi);
+
+       return 0;
+}
+
+
+static const struct net_device_ops i40e_fcoe_netdev_ops = {
+       .ndo_open               = i40e_open,
+       .ndo_stop               = i40e_close,
+       .ndo_get_stats64        = i40e_get_netdev_stats_struct,
+       .ndo_set_rx_mode        = i40e_set_rx_mode,
+       .ndo_validate_addr      = eth_validate_addr,
+       .ndo_set_mac_address    = i40e_set_mac,
+       .ndo_change_mtu         = i40e_fcoe_change_mtu,
+       .ndo_do_ioctl           = i40e_ioctl,
+       .ndo_tx_timeout         = i40e_tx_timeout,
+       .ndo_vlan_rx_add_vid    = i40e_vlan_rx_add_vid,
+       .ndo_vlan_rx_kill_vid   = i40e_vlan_rx_kill_vid,
+       .ndo_setup_tc           = i40e_setup_tc,
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = i40e_netpoll,
+#endif
+       .ndo_start_xmit         = i40e_fcoe_xmit_frame,
+       .ndo_fcoe_enable        = i40e_fcoe_enable,
+       .ndo_fcoe_disable       = i40e_fcoe_disable,
+       .ndo_fcoe_ddp_setup     = i40e_fcoe_ddp_get,
+       .ndo_fcoe_ddp_done      = i40e_fcoe_ddp_put,
+       .ndo_fcoe_ddp_target    = i40e_fcoe_ddp_target,
+       .ndo_set_features       = i40e_fcoe_set_features,
+};
+
+/**
+ * i40e_fcoe_config_netdev - prepares the VSI context for creating a FCoE VSI
+ * @vsi: pointer to the associated VSI struct
+ * @ctxt: pointer to the associated VSI context to be passed to HW
+ *
+ * Returns 0 on success or < 0 on error
+ **/
+void i40e_fcoe_config_netdev(struct net_device *netdev, struct i40e_vsi *vsi)
+{
+       struct i40e_hw *hw = &vsi->back->hw;
+       struct i40e_pf *pf = vsi->back;
+
+       if (vsi->type != I40E_VSI_FCOE)
+               return;
+
+       netdev->features = (NETIF_F_HW_VLAN_CTAG_TX |
+                           NETIF_F_HW_VLAN_CTAG_RX |
+                           NETIF_F_HW_VLAN_CTAG_FILTER);
+
+       netdev->vlan_features = netdev->features;
+       netdev->vlan_features &= ~(NETIF_F_HW_VLAN_CTAG_TX |
+                                  NETIF_F_HW_VLAN_CTAG_RX |
+                                  NETIF_F_HW_VLAN_CTAG_FILTER);
+       netdev->fcoe_ddp_xid = I40E_FCOE_DDP_MAX - 1;
+       netdev->features |= NETIF_F_ALL_FCOE;
+       netdev->vlan_features |= NETIF_F_ALL_FCOE;
+       netdev->hw_features |= netdev->features;
+       netdev->priv_flags |= IFF_UNICAST_FLT;
+       netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+       strlcpy(netdev->name, "fcoe%d", IFNAMSIZ-1);
+       netdev->mtu = FCOE_MTU;
+       SET_NETDEV_DEV(netdev, &pf->pdev->dev);
+       i40e_add_filter(vsi, hw->mac.san_addr, 0, false, false);
+       i40e_add_filter(vsi, (u8[6]) FC_FCOE_FLOGI_MAC, 0, false, false);
+       i40e_add_filter(vsi, FIP_ALL_FCOE_MACS, 0, false, false);
+       i40e_add_filter(vsi, FIP_ALL_ENODE_MACS, 0, false, false);
+       i40e_add_filter(vsi, FIP_ALL_VN2VN_MACS, 0, false, false);
+       i40e_add_filter(vsi, FIP_ALL_P2P_MACS, 0, false, false);
+
+       /* use san mac */
+       ether_addr_copy(netdev->dev_addr, hw->mac.san_addr);
+       ether_addr_copy(netdev->perm_addr, hw->mac.san_addr);
+       /* fcoe netdev ops */
+       netdev->netdev_ops = &i40e_fcoe_netdev_ops;
+}
+
+/**
+ * i40e_fcoe_vsi_setup - allocate and set up FCoE VSI
+ * @pf: the pf that VSI is associated with
+ *
+ **/
+void i40e_fcoe_vsi_setup(struct i40e_pf *pf)
+{
+       struct i40e_vsi *vsi;
+       u16 seid;
+       int i;
+
+       if (!(pf->flags & I40E_FLAG_FCOE_ENABLED))
+               return;
+
+       BUG_ON(!pf->vsi[pf->lan_vsi]);
+
+       for (i = 0; i < pf->num_alloc_vsi; i++) {
+               vsi = pf->vsi[i];
+               if (vsi && vsi->type == I40E_VSI_FCOE) {
+                       dev_warn(&pf->pdev->dev,
+                                "FCoE VSI already created\n");
+                       return;
+               }
+       }
+
+       seid = pf->vsi[pf->lan_vsi]->seid;
+       vsi = i40e_vsi_setup(pf, I40E_VSI_FCOE, seid, 0);
+       if (vsi) {
+               dev_dbg(&pf->pdev->dev,
+                       "Successfully created FCoE VSI seid %d id %d uplink_seid %d pf seid %d\n",
+                       vsi->seid, vsi->id, vsi->uplink_seid, seid);
+       } else {
+               dev_info(&pf->pdev->dev, "Failed to create FCoE VSI\n");
+       }
+}
diff --git a/drivers/net/ethernet/intel/i40e/i40e_fcoe.h b/drivers/net/ethernet/intel/i40e/i40e_fcoe.h
new file mode 100644 (file)
index 0000000..21e0f58
--- /dev/null
@@ -0,0 +1,128 @@
+/*******************************************************************************
+ *
+ * Intel Ethernet Controller XL710 Family Linux Driver
+ * Copyright(c) 2013 - 2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program.  If not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ *
+ ******************************************************************************/
+
+#ifndef _I40E_FCOE_H_
+#define _I40E_FCOE_H_
+
+/* FCoE HW context helper macros */
+#define I40E_DDP_CONTEXT_DESC(R, i)     \
+       (&(((struct i40e_fcoe_ddp_context_desc *)((R)->desc))[i]))
+
+#define I40E_QUEUE_CONTEXT_DESC(R, i)   \
+       (&(((struct i40e_fcoe_queue_context_desc *)((R)->desc))[i]))
+
+#define I40E_FILTER_CONTEXT_DESC(R, i)  \
+       (&(((struct i40e_fcoe_filter_context_desc *)((R)->desc))[i]))
+
+
+/* receive queue descriptor filter status for FCoE */
+#define I40E_RX_DESC_FLTSTAT_FCMASK    0x3
+#define I40E_RX_DESC_FLTSTAT_NOMTCH    0x0     /* no ddp context match */
+#define I40E_RX_DESC_FLTSTAT_NODDP     0x1     /* no ddp due to error */
+#define I40E_RX_DESC_FLTSTAT_DDP       0x2     /* DDPed payload, post header */
+#define I40E_RX_DESC_FLTSTAT_FCPRSP    0x3     /* FCP_RSP */
+
+/* receive queue descriptor error codes for FCoE */
+#define I40E_RX_DESC_FCOE_ERROR_MASK           \
+       (I40E_RX_DESC_ERROR_L3L4E_PROT |        \
+        I40E_RX_DESC_ERROR_L3L4E_FC |          \
+        I40E_RX_DESC_ERROR_L3L4E_DMAC_ERR |    \
+        I40E_RX_DESC_ERROR_L3L4E_DMAC_WARN)
+
+/* receive queue descriptor programming error */
+#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL(e)    \
+       (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT) & 0x1)
+
+#define I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)    \
+       (((e) >> I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT) & 0x1)
+
+#define I40E_RX_PROG_FCOE_ERROR_TBL_FULL_BIT   \
+       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_TBL_FULL_SHIFT)
+#define I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT   \
+       (1 << I40E_RX_PROG_STATUS_DESC_FCOE_CONFLICT_SHIFT)
+
+#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL(e)    \
+       I40E_RX_PROG_FCOE_ERROR_CONFLICT(e)
+#define I40E_RX_PROG_FCOE_ERROR_INVLFAIL_BIT   \
+       I40E_RX_PROG_FCOE_ERROR_CONFLICT_BIT
+
+/* FCoE DDP related definitions */
+#define I40E_FCOE_MIN_XID      0x0000  /* the min xid supported by fcoe_sw */
+#define I40E_FCOE_MAX_XID      0x0FFF  /* the max xid supported by fcoe_sw */
+#define I40E_FCOE_DDP_BUFFCNT_MAX      512     /* 9 bits bufcnt */
+#define I40E_FCOE_DDP_PTR_ALIGN                16
+#define I40E_FCOE_DDP_PTR_MAX  (I40E_FCOE_DDP_BUFFCNT_MAX * sizeof(dma_addr_t))
+#define I40E_FCOE_DDP_BUF_MIN  4096
+#define I40E_FCOE_DDP_MAX      2048
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT  8
+
+/* supported netdev features for FCoE */
+#define I40E_FCOE_NETIF_FEATURES (NETIF_F_ALL_FCOE | \
+       NETIF_F_HW_VLAN_CTAG_TX | \
+       NETIF_F_HW_VLAN_CTAG_RX | \
+       NETIF_F_HW_VLAN_CTAG_FILTER)
+
+/* DDP context flags */
+enum i40e_fcoe_ddp_flags {
+       __I40E_FCOE_DDP_NONE = 1,
+       __I40E_FCOE_DDP_TARGET,
+       __I40E_FCOE_DDP_INITALIZED,
+       __I40E_FCOE_DDP_PROGRAMMED,
+       __I40E_FCOE_DDP_DONE,
+       __I40E_FCOE_DDP_ABORTED,
+       __I40E_FCOE_DDP_UNMAPPED,
+};
+
+/* DDP SW context struct */
+struct i40e_fcoe_ddp {
+       int len;
+       u16 xid;
+       u16 firstoff;
+       u16 lastsize;
+       u16 list_len;
+       u8 fcerr;
+       u8 prerr;
+       unsigned long flags;
+       unsigned int sgc;
+       struct scatterlist *sgl;
+       dma_addr_t udp;
+       u64 *udl;
+       struct dma_pool *pool;
+
+};
+
+struct i40e_fcoe_ddp_pool {
+       struct dma_pool *pool;
+};
+
+struct i40e_fcoe {
+       unsigned long mode;
+       atomic_t refcnt;
+       struct i40e_fcoe_ddp_pool __percpu *ddp_pool;
+       struct i40e_fcoe_ddp ddp[I40E_FCOE_DDP_MAX];
+};
+
+#endif /* _I40E_FCOE_H_ */
index b45d8fe..732a026 100644 (file)
@@ -127,7 +127,7 @@ struct i40e_hmc_info {
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
                (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +146,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
index 870ab1e..4627588 100644 (file)
@@ -417,7 +417,6 @@ static i40e_status i40e_create_lan_hmc_object(struct i40e_hw *hw,
                        default:
                                ret_code = I40E_ERR_INVALID_SD_TYPE;
                                goto exit;
-                               break;
                        }
                }
        }
@@ -502,7 +501,6 @@ try_type_paged:
                hw_dbg(hw, "i40e_configure_lan_hmc: Unknown SD type: %d\n",
                          ret_code);
                goto configure_lan_hmc_out;
-               break;
        }
 
        /* Configure and program the FPM registers so objects can be created */
@@ -746,6 +744,194 @@ static struct i40e_context_ele i40e_hmc_rxq_ce_info[] = {
        { 0 }
 };
 
+/**
+ * i40e_write_byte - replace HMC context byte
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_byte(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *src)
+{
+       u8 src_byte, dest_byte, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = ((u8)1 << ce_info->width) - 1;
+
+       src_byte = *from;
+       src_byte &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_byte <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_byte, dest, sizeof(dest_byte));
+
+       dest_byte &= ~mask;     /* get the bits not changing */
+       dest_byte |= src_byte;  /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_byte, sizeof(dest_byte));
+}
+
+/**
+ * i40e_write_word - replace HMC context word
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_word(u8 *hmc_bits,
+                           struct i40e_context_ele *ce_info,
+                           u8 *src)
+{
+       u16 src_word, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le16 dest_word;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+       mask = ((u16)1 << ce_info->width) - 1;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_word = *(u16 *)from;
+       src_word &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_word <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_word, dest, sizeof(dest_word));
+
+       dest_word &= ~(cpu_to_le16(mask));      /* get the bits not changing */
+       dest_word |= cpu_to_le16(src_word);     /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_word, sizeof(dest_word));
+}
+
+/**
+ * i40e_write_dword - replace HMC context dword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_dword(u8 *hmc_bits,
+                            struct i40e_context_ele *ce_info,
+                            u8 *src)
+{
+       u32 src_dword, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le32 dest_dword;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 32 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 5 bits so the shift will do nothing
+        */
+       if (ce_info->width < 32)
+               mask = ((u32)1 << ce_info->width) - 1;
+       else
+               mask = 0xFFFFFFFF;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_dword = *(u32 *)from;
+       src_dword &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_dword <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_dword, dest, sizeof(dest_dword));
+
+       dest_dword &= ~(cpu_to_le32(mask));     /* get the bits not changing */
+       dest_dword |= cpu_to_le32(src_dword);   /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_dword, sizeof(dest_dword));
+}
+
+/**
+ * i40e_write_qword - replace HMC context qword
+ * @hmc_bits: pointer to the HMC memory
+ * @ce_info: a description of the struct to be read from
+ * @src: the struct to be read from
+ **/
+static void i40e_write_qword(u8 *hmc_bits,
+                            struct i40e_context_ele *ce_info,
+                            u8 *src)
+{
+       u64 src_qword, mask;
+       u8 *from, *dest;
+       u16 shift_width;
+       __le64 dest_qword;
+
+       /* copy from the next struct field */
+       from = src + ce_info->offset;
+
+       /* prepare the bits and mask */
+       shift_width = ce_info->lsb % 8;
+
+       /* if the field width is exactly 64 on an x86 machine, then the shift
+        * operation will not work because the SHL instructions count is masked
+        * to 6 bits so the shift will do nothing
+        */
+       if (ce_info->width < 64)
+               mask = ((u64)1 << ce_info->width) - 1;
+       else
+               mask = 0xFFFFFFFFFFFFFFFF;
+
+       /* don't swizzle the bits until after the mask because the mask bits
+        * will be in a different bit position on big endian machines
+        */
+       src_qword = *(u64 *)from;
+       src_qword &= mask;
+
+       /* shift to correct alignment */
+       mask <<= shift_width;
+       src_qword <<= shift_width;
+
+       /* get the current bits from the target bit string */
+       dest = hmc_bits + (ce_info->lsb / 8);
+
+       memcpy(&dest_qword, dest, sizeof(dest_qword));
+
+       dest_qword &= ~(cpu_to_le64(mask));     /* get the bits not changing */
+       dest_qword |= cpu_to_le64(src_qword);   /* add in the new bits */
+
+       /* put it all back */
+       memcpy(dest, &dest_qword, sizeof(dest_qword));
+}
+
 /**
  * i40e_clear_hmc_context - zero out the HMC context bits
  * @hw:       the hardware struct
@@ -772,71 +958,28 @@ static i40e_status i40e_set_hmc_context(u8 *context_bytes,
                                        struct i40e_context_ele *ce_info,
                                        u8 *dest)
 {
-       u16 shift_width;
-       u64 bitfield;
-       u8 hi_byte;
-       u8 hi_mask;
-       u64 t_bits;
-       u64 mask;
-       u8 *p;
        int f;
 
        for (f = 0; ce_info[f].width != 0; f++) {
-               /* clear out the field */
-               bitfield = 0;
 
-               /* copy from the next struct field */
-               p = dest + ce_info[f].offset;
+               /* we have to deal with each element of the HMC using the
+                * correct size so that we are correct regardless of the
+                * endianness of the machine
+                */
                switch (ce_info[f].size_of) {
                case 1:
-                       bitfield = *p;
+                       i40e_write_byte(context_bytes, &ce_info[f], dest);
                        break;
                case 2:
-                       bitfield = cpu_to_le16(*(u16 *)p);
+                       i40e_write_word(context_bytes, &ce_info[f], dest);
                        break;
                case 4:
-                       bitfield = cpu_to_le32(*(u32 *)p);
+                       i40e_write_dword(context_bytes, &ce_info[f], dest);
                        break;
                case 8:
-                       bitfield = cpu_to_le64(*(u64 *)p);
+                       i40e_write_qword(context_bytes, &ce_info[f], dest);
                        break;
                }
-
-               /* prepare the bits and mask */
-               shift_width = ce_info[f].lsb % 8;
-               mask = ((u64)1 << ce_info[f].width) - 1;
-
-               /* save upper bytes for special case */
-               hi_mask = (u8)((mask >> 56) & 0xff);
-               hi_byte = (u8)((bitfield >> 56) & 0xff);
-
-               /* shift to correct alignment */
-               mask <<= shift_width;
-               bitfield <<= shift_width;
-
-               /* get the current bits from the target bit string */
-               p = context_bytes + (ce_info[f].lsb / 8);
-               memcpy(&t_bits, p, sizeof(u64));
-
-               t_bits &= ~mask;          /* get the bits not changing */
-               t_bits |= bitfield;       /* add in the new bits */
-
-               /* put it all back */
-               memcpy(p, &t_bits, sizeof(u64));
-
-               /* deal with the special case if needed
-                * example: 62 bit field that starts in bit 5 of first byte
-                *          will overlap 3 bits into byte 9
-                */
-               if ((shift_width + ce_info[f].width) > 64) {
-                       u8 byte;
-
-                       hi_mask >>= (8 - shift_width);
-                       hi_byte >>= (8 - shift_width);
-                       byte = p[8] & ~hi_mask;  /* get the bits not changing */
-                       byte |= hi_byte;         /* add in the new bits */
-                       p[8] = byte;             /* put it back */
-               }
        }
 
        return 0;
index eb65fe2..e74128d 100644 (file)
@@ -32,16 +32,22 @@ struct i40e_hw;
 
 /* HMC element context information */
 
-/* Rx queue context data */
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
 struct i40e_hmc_obj_rxq {
        u16 head;
-       u8  cpuid;
+       u16 cpuid; /* bigger than needed, see above for reason */
        u64 base;
        u16 qlen;
 #define I40E_RXQ_CTX_DBUFF_SHIFT 7
-       u8  dbuff;
+       u16 dbuff; /* bigger than needed, see above for reason */
 #define I40E_RXQ_CTX_HBUFF_SHIFT 6
-       u8  hbuff;
+       u16 hbuff; /* bigger than needed, see above for reason */
        u8  dtype;
        u8  dsize;
        u8  crcstrip;
@@ -50,16 +56,22 @@ struct i40e_hmc_obj_rxq {
        u8  hsplit_0;
        u8  hsplit_1;
        u8  showiv;
-       u16 rxmax;
+       u32 rxmax; /* bigger than needed, see above for reason */
        u8  tphrdesc_ena;
        u8  tphwdesc_ena;
        u8  tphdata_ena;
        u8  tphhead_ena;
-       u8  lrxqthresh;
+       u16 lrxqthresh; /* bigger than needed, see above for reason */
        u8  prefena;    /* NOTE: normally must be set to 1 at init */
 };
 
-/* Tx queue context data */
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
 struct i40e_hmc_obj_txq {
        u16 head;
        u8  new_context;
@@ -69,7 +81,7 @@ struct i40e_hmc_obj_txq {
        u8  fd_ena;
        u8  alt_vlan_ena;
        u16 thead_wb;
-       u16 cpuid;
+       u cpuid;
        u8  head_wb_ena;
        u16 qlen;
        u8  tphrdesc_ena;
index 275ca9a..51bc030 100644 (file)
@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
 
 #define DRV_VERSION_MAJOR 0
 #define DRV_VERSION_MINOR 4
-#define DRV_VERSION_BUILD 10
+#define DRV_VERSION_BUILD 21
 #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
             __stringify(DRV_VERSION_MINOR) "." \
             __stringify(DRV_VERSION_BUILD)    DRV_KERN
@@ -269,7 +269,11 @@ static void i40e_service_event_schedule(struct i40e_pf *pf)
  * device is munged, not just the one netdev port, so go for the full
  * reset.
  **/
+#ifdef I40E_FCOE
+void i40e_tx_timeout(struct net_device *netdev)
+#else
 static void i40e_tx_timeout(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -278,7 +282,7 @@ static void i40e_tx_timeout(struct net_device *netdev)
        pf->tx_timeout_count++;
 
        if (time_after(jiffies, (pf->tx_timeout_last_recovery + HZ*20)))
-               pf->tx_timeout_recovery_level = 0;
+               pf->tx_timeout_recovery_level = 1;
        pf->tx_timeout_last_recovery = jiffies;
        netdev_info(netdev, "tx_timeout recovery level %d\n",
                    pf->tx_timeout_recovery_level);
@@ -304,8 +308,8 @@ static void i40e_tx_timeout(struct net_device *netdev)
                break;
        default:
                netdev_err(netdev, "tx_timeout recovery unsuccessful\n");
-               set_bit(__I40E_DOWN, &vsi->state);
-               i40e_down(vsi);
+               set_bit(__I40E_DOWN_REQUESTED, &pf->state);
+               set_bit(__I40E_DOWN_REQUESTED, &vsi->state);
                break;
        }
        i40e_service_event_schedule(pf);
@@ -349,9 +353,15 @@ struct rtnl_link_stats64 *i40e_get_vsi_stats_struct(struct i40e_vsi *vsi)
  * Returns the address of the device statistics structure.
  * The statistics are actually updated from the service task.
  **/
+#ifdef I40E_FCOE
+struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
+                                            struct net_device *netdev,
+                                            struct rtnl_link_stats64 *stats)
+#else
 static struct rtnl_link_stats64 *i40e_get_netdev_stats_struct(
                                             struct net_device *netdev,
                                             struct rtnl_link_stats64 *stats)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_ring *tx_ring, *rx_ring;
@@ -444,9 +454,21 @@ void i40e_vsi_reset_stats(struct i40e_vsi *vsi)
  **/
 void i40e_pf_reset_stats(struct i40e_pf *pf)
 {
+       int i;
+
        memset(&pf->stats, 0, sizeof(pf->stats));
        memset(&pf->stats_offsets, 0, sizeof(pf->stats_offsets));
        pf->stat_offsets_loaded = false;
+
+       for (i = 0; i < I40E_MAX_VEB; i++) {
+               if (pf->veb[i]) {
+                       memset(&pf->veb[i]->stats, 0,
+                              sizeof(pf->veb[i]->stats));
+                       memset(&pf->veb[i]->stats_offsets, 0,
+                              sizeof(pf->veb[i]->stats_offsets));
+                       pf->veb[i]->stat_offsets_loaded = false;
+               }
+       }
 }
 
 /**
@@ -624,6 +646,55 @@ static void i40e_update_veb_stats(struct i40e_veb *veb)
        veb->stat_offsets_loaded = true;
 }
 
+#ifdef I40E_FCOE
+/**
+ * i40e_update_fcoe_stats - Update FCoE-specific ethernet statistics counters.
+ * @vsi: the VSI that is capable of doing FCoE
+ **/
+static void i40e_update_fcoe_stats(struct i40e_vsi *vsi)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       struct i40e_fcoe_stats *ofs;
+       struct i40e_fcoe_stats *fs;     /* device's eth stats */
+       int idx;
+
+       if (vsi->type != I40E_VSI_FCOE)
+               return;
+
+       idx = (pf->pf_seid - I40E_BASE_PF_SEID) + I40E_FCOE_PF_STAT_OFFSET;
+       fs = &vsi->fcoe_stats;
+       ofs = &vsi->fcoe_stats_offsets;
+
+       i40e_stat_update32(hw, I40E_GL_FCOEPRC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_packets, &fs->rx_fcoe_packets);
+       i40e_stat_update48(hw, I40E_GL_FCOEDWRCH(idx), I40E_GL_FCOEDWRCL(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_dwords, &fs->rx_fcoe_dwords);
+       i40e_stat_update32(hw, I40E_GL_FCOERPDC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->rx_fcoe_dropped, &fs->rx_fcoe_dropped);
+       i40e_stat_update32(hw, I40E_GL_FCOEPTC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->tx_fcoe_packets, &fs->tx_fcoe_packets);
+       i40e_stat_update48(hw, I40E_GL_FCOEDWTCH(idx), I40E_GL_FCOEDWTCL(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->tx_fcoe_dwords, &fs->tx_fcoe_dwords);
+       i40e_stat_update32(hw, I40E_GL_FCOECRC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_bad_fccrc, &fs->fcoe_bad_fccrc);
+       i40e_stat_update32(hw, I40E_GL_FCOELAST(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_last_error, &fs->fcoe_last_error);
+       i40e_stat_update32(hw, I40E_GL_FCOEDDPC(idx),
+                          vsi->fcoe_stat_offsets_loaded,
+                          &ofs->fcoe_ddp_count, &fs->fcoe_ddp_count);
+
+       vsi->fcoe_stat_offsets_loaded = true;
+}
+
+#endif
 /**
  * i40e_update_link_xoff_rx - Update XOFF received in link flow control mode
  * @pf: the corresponding PF
@@ -1052,6 +1123,9 @@ void i40e_update_stats(struct i40e_vsi *vsi)
                i40e_update_pf_stats(pf);
 
        i40e_update_vsi_stats(vsi);
+#ifdef I40E_FCOE
+       i40e_update_fcoe_stats(vsi);
+#endif
 }
 
 /**
@@ -1303,7 +1377,11 @@ void i40e_del_filter(struct i40e_vsi *vsi,
  *
  * Returns 0 on success, negative on failure
  **/
+#ifdef I40E_FCOE
+int i40e_set_mac(struct net_device *netdev, void *p)
+#else
 static int i40e_set_mac(struct net_device *netdev, void *p)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -1315,9 +1393,6 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
 
        netdev_info(netdev, "set mac address=%pM\n", addr->sa_data);
 
-       if (ether_addr_equal(netdev->dev_addr, addr->sa_data))
-               return 0;
-
        if (test_bit(__I40E_DOWN, &vsi->back->state) ||
            test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state))
                return -EADDRNOTAVAIL;
@@ -1325,7 +1400,7 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
        if (vsi->type == I40E_VSI_MAIN) {
                i40e_status ret;
                ret = i40e_aq_mac_address_write(&vsi->back->hw,
-                                               I40E_AQC_WRITE_TYPE_LAA_ONLY,
+                                               I40E_AQC_WRITE_TYPE_LAA_WOL,
                                                addr->sa_data, NULL);
                if (ret) {
                        netdev_info(netdev,
@@ -1333,22 +1408,27 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
                                    ret);
                        return -EADDRNOTAVAIL;
                }
-
-               ether_addr_copy(vsi->back->hw.mac.addr, addr->sa_data);
        }
 
-       /* In order to be sure to not drop any packets, add the new address
-        * then delete the old one.
-        */
-       f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY, false, false);
-       if (!f)
-               return -ENOMEM;
+       f = i40e_find_mac(vsi, addr->sa_data, false, true);
+       if (!f) {
+               /* In order to be sure to not drop any packets, add the
+                * new address first then delete the old one.
+                */
+               f = i40e_add_filter(vsi, addr->sa_data, I40E_VLAN_ANY,
+                                   false, false);
+               if (!f)
+                       return -ENOMEM;
 
-       i40e_sync_vsi_filters(vsi);
-       i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY, false, false);
-       i40e_sync_vsi_filters(vsi);
+               i40e_sync_vsi_filters(vsi);
+               i40e_del_filter(vsi, netdev->dev_addr, I40E_VLAN_ANY,
+                               false, false);
+               i40e_sync_vsi_filters(vsi);
+       }
 
-       ether_addr_copy(netdev->dev_addr, addr->sa_data);
+       f->is_laa = true;
+       if (!ether_addr_equal(netdev->dev_addr, addr->sa_data))
+               ether_addr_copy(netdev->dev_addr, addr->sa_data);
 
        return 0;
 }
@@ -1362,10 +1442,17 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
  *
  * Setup VSI queue mapping for enabled traffic classes.
  **/
+#ifdef I40E_FCOE
+void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
+                             struct i40e_vsi_context *ctxt,
+                             u8 enabled_tc,
+                             bool is_add)
+#else
 static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                                     struct i40e_vsi_context *ctxt,
                                     u8 enabled_tc,
                                     bool is_add)
+#endif
 {
        struct i40e_pf *pf = vsi->back;
        u16 sections = 0;
@@ -1411,6 +1498,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
                        case I40E_VSI_MAIN:
                                qcount = min_t(int, pf->rss_size, num_tc_qps);
                                break;
+#ifdef I40E_FCOE
+                       case I40E_VSI_FCOE:
+                               qcount = num_tc_qps;
+                               break;
+#endif
                        case I40E_VSI_FDIR:
                        case I40E_VSI_SRIOV:
                        case I40E_VSI_VMDQ2:
@@ -1477,7 +1569,11 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
  * i40e_set_rx_mode - NDO callback to set the netdev filters
  * @netdev: network interface device structure
  **/
+#ifdef I40E_FCOE
+void i40e_set_rx_mode(struct net_device *netdev)
+#else
 static void i40e_set_rx_mode(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_mac_filter *f, *ftmp;
@@ -2055,8 +2151,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
  *
  * net_device_ops implementation for adding vlan ids
  **/
+#ifdef I40E_FCOE
+int i40e_vlan_rx_add_vid(struct net_device *netdev,
+                        __always_unused __be16 proto, u16 vid)
+#else
 static int i40e_vlan_rx_add_vid(struct net_device *netdev,
                                __always_unused __be16 proto, u16 vid)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2089,8 +2190,13 @@ static int i40e_vlan_rx_add_vid(struct net_device *netdev,
  *
  * net_device_ops implementation for removing vlan ids
  **/
+#ifdef I40E_FCOE
+int i40e_vlan_rx_kill_vid(struct net_device *netdev,
+                         __always_unused __be16 proto, u16 vid)
+#else
 static int i40e_vlan_rx_kill_vid(struct net_device *netdev,
                                 __always_unused __be16 proto, u16 vid)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -2222,6 +2328,9 @@ static int i40e_vsi_setup_rx_resources(struct i40e_vsi *vsi)
 
        for (i = 0; i < vsi->num_queue_pairs && !err; i++)
                err = i40e_setup_rx_descriptors(vsi->rx_rings[i]);
+#ifdef I40E_FCOE
+       i40e_fcoe_setup_ddp_resources(vsi);
+#endif
        return err;
 }
 
@@ -2241,6 +2350,9 @@ static void i40e_vsi_free_rx_resources(struct i40e_vsi *vsi)
        for (i = 0; i < vsi->num_queue_pairs; i++)
                if (vsi->rx_rings[i] && vsi->rx_rings[i]->desc)
                        i40e_free_rx_resources(vsi->rx_rings[i]);
+#ifdef I40E_FCOE
+       i40e_fcoe_free_ddp_resources(vsi);
+#endif
 }
 
 /**
@@ -2282,6 +2394,9 @@ static int i40e_configure_tx_ring(struct i40e_ring *ring)
        tx_ctx.qlen = ring->count;
        tx_ctx.fd_ena = !!(vsi->back->flags & (I40E_FLAG_FD_SB_ENABLED |
                                               I40E_FLAG_FD_ATR_ENABLED));
+#ifdef I40E_FCOE
+       tx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
+#endif
        tx_ctx.timesync_ena = !!(vsi->back->flags & I40E_FLAG_PTP);
        /* FDIR VSI tx ring can still use RS bit and writebacks */
        if (vsi->type != I40E_VSI_FDIR)
@@ -2387,10 +2502,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
 
        rx_ctx.rxmax = min_t(u16, vsi->max_frame,
                                  (chain_len * ring->rx_buf_len));
-       rx_ctx.tphrdesc_ena = 1;
-       rx_ctx.tphwdesc_ena = 1;
-       rx_ctx.tphdata_ena = 1;
-       rx_ctx.tphhead_ena = 1;
        if (hw->revision_id == 0)
                rx_ctx.lrxqthresh = 0;
        else
@@ -2398,6 +2509,9 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
        rx_ctx.crcstrip = 1;
        rx_ctx.l2tsel = 1;
        rx_ctx.showiv = 1;
+#ifdef I40E_FCOE
+       rx_ctx.fc_ena = (vsi->type == I40E_VSI_FCOE);
+#endif
        /* set the prefena field to 1 because the manual says to */
        rx_ctx.prefena = 1;
 
@@ -2482,6 +2596,17 @@ static int i40e_vsi_configure_rx(struct i40e_vsi *vsi)
                break;
        }
 
+#ifdef I40E_FCOE
+       /* setup rx buffer for FCoE */
+       if ((vsi->type == I40E_VSI_FCOE) &&
+           (vsi->back->flags & I40E_FLAG_FCOE_ENABLED)) {
+               vsi->rx_hdr_len = 0;
+               vsi->rx_buf_len = I40E_RXBUFFER_3072;
+               vsi->max_frame = I40E_RXBUFFER_3072;
+               vsi->dtype = I40E_RX_DTYPE_NO_SPLIT;
+       }
+
+#endif /* I40E_FCOE */
        /* round up for the chip's needs */
        vsi->rx_hdr_len = ALIGN(vsi->rx_hdr_len,
                                (1 << I40E_RXQ_CTX_HBUFF_SHIFT));
@@ -2755,6 +2880,22 @@ void i40e_irq_dynamic_enable(struct i40e_vsi *vsi, int vector)
        /* skip the flush */
 }
 
+/**
+ * i40e_irq_dynamic_disable - Disable default interrupt generation settings
+ * @vsi: pointer to a vsi
+ * @vector: enable a particular Hw Interrupt vector
+ **/
+void i40e_irq_dynamic_disable(struct i40e_vsi *vsi, int vector)
+{
+       struct i40e_pf *pf = vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+       u32 val;
+
+       val = I40E_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+       wr32(hw, I40E_PFINT_DYN_CTLN(vector - 1), val);
+       i40e_flush(hw);
+}
+
 /**
  * i40e_msix_clean_rings - MSIX mode Interrupt Handler
  * @irq: interrupt number
@@ -3057,16 +3198,33 @@ static bool i40e_clean_fdir_tx_irq(struct i40e_ring *tx_ring, int budget)
                /* clear next_to_watch to prevent false hangs */
                tx_buf->next_to_watch = NULL;
 
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
+               /* move past filter desc */
+               tx_buf++;
+               tx_desc++;
+               i++;
+               if (unlikely(!i)) {
+                       i -= tx_ring->count;
+                       tx_buf = tx_ring->tx_bi;
+                       tx_desc = I40E_TX_DESC(tx_ring, 0);
+               }
                /* unmap skb header data */
                dma_unmap_single(tx_ring->dev,
                                 dma_unmap_addr(tx_buf, dma),
                                 dma_unmap_len(tx_buf, len),
                                 DMA_TO_DEVICE);
+               if (tx_buf->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buf->raw_buf);
 
+               tx_buf->raw_buf = NULL;
+               tx_buf->tx_flags = 0;
+               tx_buf->next_to_watch = NULL;
                dma_unmap_len_set(tx_buf, len, 0);
+               tx_desc->buffer_addr = 0;
+               tx_desc->cmd_type_offset_bsz = 0;
 
-
-               /* move to the next desc and buffer to clean */
+               /* move us past the eop_desc for start of next FD desc */
                tx_buf++;
                tx_desc++;
                i++;
@@ -3151,8 +3309,12 @@ static void i40e_vsi_map_rings_to_vectors(struct i40e_vsi *vsi)
 
        /* If we don't have enough vectors for a 1-to-1 mapping, we'll have to
         * group them so there are multiple queues per vector.
+        * It is also important to go through all the vectors available to be
+        * sure that if we don't use all the vectors, that the remaining vectors
+        * are cleared. This is especially important when decreasing the
+        * number of queues in use.
         */
-       for (; v_start < q_vectors && qp_remaining; v_start++) {
+       for (; v_start < q_vectors; v_start++) {
                struct i40e_q_vector *q_vector = vsi->q_vectors[v_start];
 
                num_ringpairs = DIV_ROUND_UP(qp_remaining, q_vectors - v_start);
@@ -3205,7 +3367,11 @@ static int i40e_vsi_request_irq(struct i40e_vsi *vsi, char *basename)
  * This is used by netconsole to send skbs without having to re-enable
  * interrupts.  It's not called while the normal interrupt routine is executing.
  **/
+#ifdef I40E_FCOE
+void i40e_netpoll(struct net_device *netdev)
+#else
 static void i40e_netpoll(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -3227,6 +3393,35 @@ static void i40e_netpoll(struct net_device *netdev)
 }
 #endif
 
+/**
+ * i40e_pf_txq_wait - Wait for a PF's Tx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Tx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_txq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 tx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               tx_reg = rd32(&pf->hw, I40E_QTX_ENA(pf_q));
+               if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
+                       break;
+
+               udelay(10);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
+
+       return 0;
+}
+
 /**
  * i40e_vsi_control_tx - Start or stop a VSI's rings
  * @vsi: the VSI being configured
@@ -3236,7 +3431,7 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q;
+       int i, j, pf_q, ret = 0;
        u32 tx_reg;
 
        pf_q = vsi->base_queue;
@@ -3269,22 +3464,46 @@ static int i40e_vsi_control_tx(struct i40e_vsi *vsi, bool enable)
                wr32(hw, I40E_QTX_ENA(pf_q), tx_reg);
 
                /* wait for the change to finish */
-               for (j = 0; j < 10; j++) {
-                       tx_reg = rd32(hw, I40E_QTX_ENA(pf_q));
-                       if (enable == !!(tx_reg & I40E_QTX_ENA_QENA_STAT_MASK))
-                               break;
-
-                       udelay(10);
-               }
-               if (j >= 10) {
-                       dev_info(&pf->pdev->dev, "Tx ring %d %sable timeout\n",
-                                pf_q, (enable ? "en" : "dis"));
-                       return -ETIMEDOUT;
+               ret = i40e_pf_txq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "%s: VSI seid %d Tx ring %d %sable timeout\n",
+                                __func__, vsi->seid, pf_q,
+                                (enable ? "en" : "dis"));
+                       break;
                }
        }
 
        if (hw->revision_id == 0)
                mdelay(50);
+       return ret;
+}
+
+/**
+ * i40e_pf_rxq_wait - Wait for a PF's Rx queue to be enabled or disabled
+ * @pf: the PF being configured
+ * @pf_q: the PF queue
+ * @enable: enable or disable state of the queue
+ *
+ * This routine will wait for the given Rx queue of the PF to reach the
+ * enabled or disabled state.
+ * Returns -ETIMEDOUT in case of failing to reach the requested state after
+ * multiple retries; else will return 0 in case of success.
+ **/
+static int i40e_pf_rxq_wait(struct i40e_pf *pf, int pf_q, bool enable)
+{
+       int i;
+       u32 rx_reg;
+
+       for (i = 0; i < I40E_QUEUE_WAIT_RETRY_LIMIT; i++) {
+               rx_reg = rd32(&pf->hw, I40E_QRX_ENA(pf_q));
+               if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
+                       break;
+
+               udelay(10);
+       }
+       if (i >= I40E_QUEUE_WAIT_RETRY_LIMIT)
+               return -ETIMEDOUT;
 
        return 0;
 }
@@ -3298,7 +3517,7 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
 {
        struct i40e_pf *pf = vsi->back;
        struct i40e_hw *hw = &pf->hw;
-       int i, j, pf_q;
+       int i, j, pf_q, ret = 0;
        u32 rx_reg;
 
        pf_q = vsi->base_queue;
@@ -3323,22 +3542,17 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
                wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
 
                /* wait for the change to finish */
-               for (j = 0; j < 10; j++) {
-                       rx_reg = rd32(hw, I40E_QRX_ENA(pf_q));
-
-                       if (enable == !!(rx_reg & I40E_QRX_ENA_QENA_STAT_MASK))
-                               break;
-
-                       udelay(10);
-               }
-               if (j >= 10) {
-                       dev_info(&pf->pdev->dev, "Rx ring %d %sable timeout\n",
-                                pf_q, (enable ? "en" : "dis"));
-                       return -ETIMEDOUT;
+               ret = i40e_pf_rxq_wait(pf, pf_q, enable);
+               if (ret) {
+                       dev_info(&pf->pdev->dev,
+                                "%s: VSI seid %d Rx ring %d %sable timeout\n",
+                                __func__, vsi->seid, pf_q,
+                                (enable ? "en" : "dis"));
+                       break;
                }
        }
 
-       return 0;
+       return ret;
 }
 
 /**
@@ -4107,12 +4321,20 @@ static void i40e_dcb_reconfigure(struct i40e_pf *pf)
                        continue;
 
                /* - Enable all TCs for the LAN VSI
+#ifdef I40E_FCOE
+                * - For FCoE VSI only enable the TC configured
+                *   as per the APP TLV
+#endif
                 * - For all others keep them at TC0 for now
                 */
                if (v == pf->lan_vsi)
                        tc_map = i40e_pf_get_tc_map(pf);
                else
                        tc_map = i40e_pf_get_default_tc(pf);
+#ifdef I40E_FCOE
+               if (pf->vsi[v]->type == I40E_VSI_FCOE)
+                       tc_map = i40e_get_fcoe_tc_map(pf);
+#endif /* #ifdef I40E_FCOE */
 
                ret = i40e_vsi_config_tc(pf->vsi[v], tc_map);
                if (ret) {
@@ -4231,8 +4453,12 @@ static void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
 static int i40e_up_complete(struct i40e_vsi *vsi)
 {
        struct i40e_pf *pf = vsi->back;
+       u8 set_fc_aq_fail = 0;
        int err;
 
+       /* force flow control off */
+       i40e_set_fc(&pf->hw, &set_fc_aq_fail, true);
+
        if (pf->flags & I40E_FLAG_MSIX_ENABLED)
                i40e_vsi_configure_msix(vsi);
        else
@@ -4335,7 +4561,11 @@ void i40e_down(struct i40e_vsi *vsi)
  * @netdev: net device to configure
  * @tc: number of traffic classes to enable
  **/
+#ifdef I40E_FCOE
+int i40e_setup_tc(struct net_device *netdev, u8 tc)
+#else
 static int i40e_setup_tc(struct net_device *netdev, u8 tc)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4400,7 +4630,11 @@ exit:
  *
  * Returns 0 on success, negative value on failure
  **/
+#ifdef I40E_FCOE
+int i40e_open(struct net_device *netdev)
+#else
 static int i40e_open(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4536,7 +4770,11 @@ static void i40e_fdir_filter_exit(struct i40e_pf *pf)
  *
  * Returns 0, this is not allowed to fail
  **/
+#ifdef I40E_FCOE
+int i40e_close(struct net_device *netdev)
+#else
 static int i40e_close(struct net_device *netdev)
+#endif
 {
        struct i40e_netdev_priv *np = netdev_priv(netdev);
        struct i40e_vsi *vsi = np->vsi;
@@ -4638,6 +4876,23 @@ void i40e_do_reset(struct i40e_pf *pf, u32 reset_flags)
                        }
                }
 
+               /* no further action needed, so return now */
+               return;
+       } else if (reset_flags & (1 << __I40E_DOWN_REQUESTED)) {
+               int v;
+
+               /* Find the VSI(s) that needs to be brought down */
+               dev_info(&pf->pdev->dev, "VSI down requested\n");
+               for (v = 0; v < pf->num_alloc_vsi; v++) {
+                       struct i40e_vsi *vsi = pf->vsi[v];
+                       if (vsi != NULL &&
+                           test_bit(__I40E_DOWN_REQUESTED, &vsi->state)) {
+                               set_bit(__I40E_DOWN, &vsi->state);
+                               i40e_down(vsi);
+                               clear_bit(__I40E_DOWN_REQUESTED, &vsi->state);
+                       }
+               }
+
                /* no further action needed, so return now */
                return;
        } else {
@@ -4845,7 +5100,20 @@ static void i40e_service_event_complete(struct i40e_pf *pf)
 }
 
 /**
- * i40e_get_current_fd_count - Get the count of FD filters programmed in the HW
+ * i40e_get_cur_guaranteed_fd_count - Get the consumed guaranteed FD filters
+ * @pf: board private structure
+ **/
+int i40e_get_cur_guaranteed_fd_count(struct i40e_pf *pf)
+{
+       int val, fcnt_prog;
+
+       val = rd32(&pf->hw, I40E_PFQF_FDSTAT);
+       fcnt_prog = (val & I40E_PFQF_FDSTAT_GUARANT_CNT_MASK);
+       return fcnt_prog;
+}
+
+/**
+ * i40e_get_current_fd_count - Get the count of total FD filters programmed
  * @pf: board private structure
  **/
 int i40e_get_current_fd_count(struct i40e_pf *pf)
@@ -4857,7 +5125,6 @@ int i40e_get_current_fd_count(struct i40e_pf *pf)
                      I40E_PFQF_FDSTAT_BEST_CNT_SHIFT);
        return fcnt_prog;
 }
-
 /**
  * i40e_fdir_check_and_reenable - Function to reenabe FD ATR or SB if disabled
  * @pf: board private structure
@@ -4872,8 +5139,8 @@ void i40e_fdir_check_and_reenable(struct i40e_pf *pf)
        if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
            (pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
-       fcnt_prog = i40e_get_current_fd_count(pf);
-       fcnt_avail = i40e_get_fd_cnt_all(pf);
+       fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+       fcnt_avail = pf->fdir_pf_filter_count;
        if (fcnt_prog < (fcnt_avail - I40E_FDIR_BUFFER_HEAD_ROOM)) {
                if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
                    (pf->auto_disable_flags & I40E_FLAG_FD_SB_ENABLED)) {
@@ -4922,6 +5189,9 @@ static void i40e_vsi_link_event(struct i40e_vsi *vsi, bool link_up)
 
        switch (vsi->type) {
        case I40E_VSI_MAIN:
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+#endif
                if (!vsi->netdev || !vsi->netdev_registered)
                        break;
 
@@ -5110,6 +5380,10 @@ static void i40e_reset_subtask(struct i40e_pf *pf)
                reset_flags |= (1 << __I40E_GLOBAL_RESET_REQUESTED);
                clear_bit(__I40E_GLOBAL_RESET_REQUESTED, &pf->state);
        }
+       if (test_bit(__I40E_DOWN_REQUESTED, &pf->state)) {
+               reset_flags |= (1 << __I40E_DOWN_REQUESTED);
+               clear_bit(__I40E_DOWN_REQUESTED, &pf->state);
+       }
 
        /* If there's a recovery already waiting, it takes
         * precedence before starting a new reset sequence.
@@ -5164,7 +5438,7 @@ static void i40e_handle_link_event(struct i40e_pf *pf,
         * then see if the status changed while processing the
         * initial event.
         */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw, true);
        i40e_link_event(pf);
 }
 
@@ -5182,9 +5456,6 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        u32 oldval;
        u32 val;
 
-       if (!test_bit(__I40E_ADMINQ_EVENT_PENDING, &pf->state))
-               return;
-
        /* check for error indications */
        val = rd32(&pf->hw, pf->hw.aq.arq.len);
        oldval = val;
@@ -5228,10 +5499,9 @@ static void i40e_clean_adminq_subtask(struct i40e_pf *pf)
        do {
                event.msg_size = I40E_MAX_AQ_BUF_SIZE; /* reinit each time */
                ret = i40e_clean_arq_element(hw, &event, &pending);
-               if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
-                       dev_info(&pf->pdev->dev, "No ARQ event found\n");
+               if (ret == I40E_ERR_ADMIN_QUEUE_NO_WORK)
                        break;
-               else if (ret) {
+               else if (ret) {
                        dev_info(&pf->pdev->dev, "ARQ event error %d\n", ret);
                        break;
                }
@@ -5463,6 +5733,20 @@ static void i40e_fdir_sb_setup(struct i40e_pf *pf)
        struct i40e_vsi *vsi;
        int i;
 
+       /* quick workaround for an NVM issue that leaves a critical register
+        * uninitialized
+        */
+       if (!rd32(&pf->hw, I40E_GLQF_HKEY(0))) {
+               static const u32 hkey[] = {
+                       0xe640d33f, 0xcdfe98ab, 0x73fa7161, 0x0d7a7d36,
+                       0xeacb7d61, 0xaa4f05b6, 0x9c5c89ed, 0xfc425ddb,
+                       0xa4654832, 0xfc7461d4, 0x8f827619, 0xf5c63c21,
+                       0x95b3a76d};
+
+               for (i = 0; i <= I40E_GLQF_HKEY_MAX_INDEX; i++)
+                       wr32(&pf->hw, I40E_GLQF_HKEY(i), hkey[i]);
+       }
+
        if (!(pf->flags & I40E_FLAG_FD_SB_ENABLED))
                return;
 
@@ -5512,7 +5796,7 @@ static void i40e_fdir_teardown(struct i40e_pf *pf)
  *
  * Close up the VFs and other things in prep for pf Reset.
   **/
-static int i40e_prep_for_reset(struct i40e_pf *pf)
+static void i40e_prep_for_reset(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        i40e_status ret = 0;
@@ -5520,7 +5804,7 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
 
        clear_bit(__I40E_RESET_INTR_RECEIVED, &pf->state);
        if (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state))
-               return 0;
+               return;
 
        dev_dbg(&pf->pdev->dev, "Tearing down internal switch for reset\n");
 
@@ -5537,13 +5821,10 @@ static int i40e_prep_for_reset(struct i40e_pf *pf)
        /* call shutdown HMC */
        if (hw->hmc.hmc_obj) {
                ret = i40e_shutdown_lan_hmc(hw);
-               if (ret) {
+               if (ret)
                        dev_warn(&pf->pdev->dev,
                                 "shutdown_lan_hmc failed: %d\n", ret);
-                       clear_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state);
-               }
        }
-       return ret;
 }
 
 /**
@@ -5629,7 +5910,12 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
                goto end_core_reset;
        }
 #endif /* CONFIG_I40E_DCB */
+#ifdef I40E_FCOE
+       ret = i40e_init_pf_fcoe(pf);
+       if (ret)
+               dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", ret);
 
+#endif
        /* do basic switch setup */
        ret = i40e_setup_pf_switch(pf, reinit);
        if (ret)
@@ -5679,7 +5965,7 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit)
        }
 
        if (pf->vsi[pf->lan_vsi]->uplink_seid == pf->mac_seid) {
-               dev_info(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
+               dev_dbg(&pf->pdev->dev, "attempting to rebuild PF VSI\n");
                /* no VEB, so rebuild only the Main VSI */
                ret = i40e_add_vsi(pf->vsi[pf->lan_vsi]);
                if (ret) {
@@ -5717,11 +6003,8 @@ end_core_reset:
  **/
 static void i40e_handle_reset_warning(struct i40e_pf *pf)
 {
-       i40e_status ret;
-
-       ret = i40e_prep_for_reset(pf);
-       if (!ret)
-               i40e_reset_and_rebuild(pf, false);
+       i40e_prep_for_reset(pf);
+       i40e_reset_and_rebuild(pf, false);
 }
 
 /**
@@ -5734,6 +6017,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
 {
        struct i40e_hw *hw = &pf->hw;
        bool mdd_detected = false;
+       bool pf_mdd_detected = false;
        struct i40e_vf *vf;
        u32 reg;
        int i;
@@ -5744,26 +6028,28 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
        /* find what triggered the MDD event */
        reg = rd32(hw, I40E_GL_MDET_TX);
        if (reg & I40E_GL_MDET_TX_VALID_MASK) {
-               u8 func = (reg & I40E_GL_MDET_TX_FUNCTION_MASK)
-                               >> I40E_GL_MDET_TX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT)
-                               >> I40E_GL_MDET_TX_EVENT_SHIFT;
-               u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK)
-                               >> I40E_GL_MDET_TX_QUEUE_SHIFT;
+               u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_PF_NUM_SHIFT;
+               u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
+                               I40E_GL_MDET_TX_VF_NUM_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >>
+                               I40E_GL_MDET_TX_EVENT_SHIFT;
+               u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+                               I40E_GL_MDET_TX_QUEUE_SHIFT;
                dev_info(&pf->pdev->dev,
-                        "Malicious Driver Detection event 0x%02x on TX queue %d of function 0x%02x\n",
-                        event, queue, func);
+                        "Malicious Driver Detection event 0x%02x on TX queue %d pf number 0x%02x vf number 0x%02x\n",
+                        event, queue, pf_num, vf_num);
                wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
                mdd_detected = true;
        }
        reg = rd32(hw, I40E_GL_MDET_RX);
        if (reg & I40E_GL_MDET_RX_VALID_MASK) {
-               u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK)
-                               >> I40E_GL_MDET_RX_FUNCTION_SHIFT;
-               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT)
-                               >> I40E_GL_MDET_RX_EVENT_SHIFT;
-               u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK)
-                               >> I40E_GL_MDET_RX_QUEUE_SHIFT;
+               u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+                               I40E_GL_MDET_RX_FUNCTION_SHIFT;
+               u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >>
+                               I40E_GL_MDET_RX_EVENT_SHIFT;
+               u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+                               I40E_GL_MDET_RX_QUEUE_SHIFT;
                dev_info(&pf->pdev->dev,
                         "Malicious Driver Detection event 0x%02x on RX queue %d of function 0x%02x\n",
                         event, queue, func);
@@ -5771,6 +6057,30 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
                mdd_detected = true;
        }
 
+       if (mdd_detected) {
+               reg = rd32(hw, I40E_PF_MDET_TX);
+               if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+                       dev_info(&pf->pdev->dev,
+                                "MDD TX event is for this function 0x%08x, requesting PF reset.\n",
+                                reg);
+                       pf_mdd_detected = true;
+               }
+               reg = rd32(hw, I40E_PF_MDET_RX);
+               if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+                       wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+                       dev_info(&pf->pdev->dev,
+                                "MDD RX event is for this function 0x%08x, requesting PF reset.\n",
+                                reg);
+                       pf_mdd_detected = true;
+               }
+               /* Queue belongs to the PF, initiate a reset */
+               if (pf_mdd_detected) {
+                       set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
+                       i40e_service_event_schedule(pf);
+               }
+       }
+
        /* see if one of the VFs needs its hand slapped */
        for (i = 0; i < pf->num_alloc_vfs && mdd_detected; i++) {
                vf = &(pf->vf[i]);
@@ -5860,6 +6170,12 @@ static void i40e_service_task(struct work_struct *work)
                                          service_task);
        unsigned long start_time = jiffies;
 
+       /* don't bother with service tasks if a reset is in progress */
+       if (test_bit(__I40E_RESET_RECOVERY_PENDING, &pf->state)) {
+               i40e_service_event_complete(pf);
+               return;
+       }
+
        i40e_reset_subtask(pf);
        i40e_handle_mdd_event(pf);
        i40e_vc_process_vflr_event(pf);
@@ -5938,6 +6254,15 @@ static int i40e_set_num_rings_in_vsi(struct i40e_vsi *vsi)
                                      I40E_REQ_DESCRIPTOR_MULTIPLE);
                break;
 
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+               vsi->alloc_queue_pairs = pf->num_fcoe_qps;
+               vsi->num_desc = ALIGN(I40E_DEFAULT_NUM_DESCRIPTORS,
+                                     I40E_REQ_DESCRIPTOR_MULTIPLE);
+               vsi->num_q_vectors = pf->num_fcoe_msix;
+               break;
+
+#endif /* I40E_FCOE */
        default:
                WARN_ON(1);
                return -ENODATA;
@@ -6249,6 +6574,9 @@ static int i40e_init_msix(struct i40e_pf *pf)
         *              is governed by number of cpus in the system.
         *      - assumes symmetric Tx/Rx pairing
         *   - The number of VMDq pairs
+#ifdef I40E_FCOE
+        *   - The number of FCOE qps.
+#endif
         * Once we count this up, try the request.
         *
         * If we can't get what we want, we'll simplify to nearly nothing
@@ -6261,6 +6589,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED)
                v_budget++;
 
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+               pf->num_fcoe_msix = pf->num_fcoe_qps;
+               v_budget += pf->num_fcoe_msix;
+       }
+
+#endif
        /* Scale down if necessary, and the rings will share vectors */
        v_budget = min_t(int, v_budget, hw->func_caps.num_msix_vectors);
 
@@ -6279,6 +6614,10 @@ static int i40e_init_msix(struct i40e_pf *pf)
                 * of these features based on the policy and at the end disable
                 * the features that did not get any vectors.
                 */
+#ifdef I40E_FCOE
+               pf->num_fcoe_qps = 0;
+               pf->num_fcoe_msix = 0;
+#endif
                pf->num_vmdq_msix = 0;
        }
 
@@ -6309,9 +6648,24 @@ static int i40e_init_msix(struct i40e_pf *pf)
                        pf->num_lan_msix = 1;
                        break;
                case 3:
+#ifdef I40E_FCOE
+                       /* give one vector to FCoE */
+                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+                               pf->num_lan_msix = 1;
+                               pf->num_fcoe_msix = 1;
+                       }
+#else
                        pf->num_lan_msix = 2;
+#endif
                        break;
                default:
+#ifdef I40E_FCOE
+                       /* give one vector to FCoE */
+                       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+                               pf->num_fcoe_msix = 1;
+                               vec--;
+                       }
+#endif
                        pf->num_lan_msix = min_t(int, (vec / 2),
                                                 pf->num_lan_qps);
                        pf->num_vmdq_vsis = min_t(int, (vec - pf->num_lan_msix),
@@ -6325,6 +6679,13 @@ static int i40e_init_msix(struct i40e_pf *pf)
                dev_info(&pf->pdev->dev, "VMDq disabled, not enough MSI-X vectors\n");
                pf->flags &= ~I40E_FLAG_VMDQ_ENABLED;
        }
+#ifdef I40E_FCOE
+
+       if ((pf->flags & I40E_FLAG_FCOE_ENABLED) && (pf->num_fcoe_msix == 0)) {
+               dev_info(&pf->pdev->dev, "FCOE disabled, not enough MSI-X vectors\n");
+               pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+       }
+#endif
        return err;
 }
 
@@ -6408,6 +6769,9 @@ static void i40e_init_interrupt_scheme(struct i40e_pf *pf)
                err = i40e_init_msix(pf);
                if (err) {
                        pf->flags &= ~(I40E_FLAG_MSIX_ENABLED   |
+#ifdef I40E_FCOE
+                                      I40E_FLAG_FCOE_ENABLED   |
+#endif
                                       I40E_FLAG_RSS_ENABLED    |
                                       I40E_FLAG_DCB_CAPABLE    |
                                       I40E_FLAG_SRIOV_ENABLED  |
@@ -6492,6 +6856,7 @@ static int i40e_config_rss(struct i40e_pf *pf)
        u32 lut = 0;
        int i, j;
        u64 hena;
+       u32 reg_val;
 
        /* Fill out hash function seed */
        for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
@@ -6504,8 +6869,19 @@ static int i40e_config_rss(struct i40e_pf *pf)
        wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
        wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
 
+       /* Check capability and Set table size and register per hw expectation*/
+       reg_val = rd32(hw, I40E_PFQF_CTL_0);
+       if (hw->func_caps.rss_table_size == 512) {
+               reg_val |= I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+               pf->rss_table_size = 512;
+       } else {
+               pf->rss_table_size = 128;
+               reg_val &= ~I40E_PFQF_CTL_0_HASHLUTSIZE_512;
+       }
+       wr32(hw, I40E_PFQF_CTL_0, reg_val);
+
        /* Populate the LUT with max no. of queues in round robin fashion */
-       for (i = 0, j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
+       for (i = 0, j = 0; i < pf->rss_table_size; i++, j++) {
 
                /* The assumption is that lan qp count will be the highest
                 * qp count for any PF VSI that needs RSS.
@@ -6592,13 +6968,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
         * maximum might end up larger than the available queues
         */
        pf->rss_size_max = 0x1 << pf->hw.func_caps.rss_table_entry_width;
+       pf->rss_size = 1;
        pf->rss_size_max = min_t(int, pf->rss_size_max,
                                 pf->hw.func_caps.num_tx_qp);
        if (pf->hw.func_caps.rss) {
                pf->flags |= I40E_FLAG_RSS_ENABLED;
                pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus());
-       } else {
-               pf->rss_size = 1;
        }
 
        /* MFP mode enabled */
@@ -6634,6 +7009,12 @@ static int i40e_sw_init(struct i40e_pf *pf)
                pf->num_vmdq_qps = I40E_DEFAULT_QUEUES_PER_VMDQ;
        }
 
+#ifdef I40E_FCOE
+       err = i40e_init_pf_fcoe(pf);
+       if (err)
+               dev_info(&pf->pdev->dev, "init_pf_fcoe failed: %d\n", err);
+
+#endif /* I40E_FCOE */
 #ifdef CONFIG_PCI_IOV
        if (pf->hw.func_caps.num_vfs) {
                pf->num_vf_qps = I40E_DEFAULT_QUEUES_PER_VF;
@@ -6670,6 +7051,8 @@ static int i40e_sw_init(struct i40e_pf *pf)
        pf->irq_pile->num_entries = pf->hw.func_caps.num_msix_vectors;
        pf->irq_pile->search_hint = 0;
 
+       pf->tx_timeout_recovery_level = 1;
+
        mutex_init(&pf->switch_mutex);
 
 sw_init_done:
@@ -6702,9 +7085,11 @@ bool i40e_set_ntuple(struct i40e_pf *pf, netdev_features_t features)
                        i40e_fdir_filter_exit(pf);
                }
                pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
-               /* if ATR was disabled it can be re-enabled. */
-               if (!(pf->flags & I40E_FLAG_FD_ATR_ENABLED))
-                       pf->flags |= I40E_FLAG_FD_ATR_ENABLED;
+               pf->auto_disable_flags &= ~I40E_FLAG_FD_SB_ENABLED;
+               /* if ATR was auto disabled it can be re-enabled. */
+               if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                   (pf->auto_disable_flags & I40E_FLAG_FD_ATR_ENABLED))
+                       pf->auto_disable_flags &= ~I40E_FLAG_FD_ATR_ENABLED;
        }
        return need_reset;
 }
@@ -6833,6 +7218,22 @@ static void i40e_del_vxlan_port(struct net_device *netdev,
 }
 
 #endif
+static int i40e_get_phys_port_id(struct net_device *netdev,
+                                struct netdev_phys_port_id *ppid)
+{
+       struct i40e_netdev_priv *np = netdev_priv(netdev);
+       struct i40e_pf *pf = np->vsi->back;
+       struct i40e_hw *hw = &pf->hw;
+
+       if (!(pf->flags & I40E_FLAG_PORT_ID_VALID))
+               return -EOPNOTSUPP;
+
+       ppid->id_len = min_t(int, sizeof(hw->mac.port_addr), sizeof(ppid->id));
+       memcpy(ppid->id, hw->mac.port_addr, ppid->id_len);
+
+       return 0;
+}
+
 #ifdef HAVE_FDB_OPS
 #ifdef USE_CONST_DEV_UC_CHAR
 static int i40e_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
@@ -6910,13 +7311,14 @@ static int i40e_ndo_fdb_del(struct ndmsg *ndm,
 static int i40e_ndo_fdb_dump(struct sk_buff *skb,
                             struct netlink_callback *cb,
                             struct net_device *dev,
+                            struct net_device *filter_dev,
                             int idx)
 {
        struct i40e_netdev_priv *np = netdev_priv(dev);
        struct i40e_pf *pf = np->vsi->back;
 
        if (pf->flags & I40E_FLAG_SRIOV_ENABLED)
-               idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, filter_dev, idx);
 
        return idx;
 }
@@ -6940,6 +7342,10 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_poll_controller    = i40e_netpoll,
 #endif
        .ndo_setup_tc           = i40e_setup_tc,
+#ifdef I40E_FCOE
+       .ndo_fcoe_enable        = i40e_fcoe_enable,
+       .ndo_fcoe_disable       = i40e_fcoe_disable,
+#endif
        .ndo_set_features       = i40e_set_features,
        .ndo_set_vf_mac         = i40e_ndo_set_vf_mac,
        .ndo_set_vf_vlan        = i40e_ndo_set_vf_port_vlan,
@@ -6951,6 +7357,7 @@ static const struct net_device_ops i40e_netdev_ops = {
        .ndo_add_vxlan_port     = i40e_add_vxlan_port,
        .ndo_del_vxlan_port     = i40e_del_vxlan_port,
 #endif
+       .ndo_get_phys_port_id   = i40e_get_phys_port_id,
 #ifdef HAVE_FDB_OPS
        .ndo_fdb_add            = i40e_ndo_fdb_add,
 #ifndef USE_DEFAULT_FDB_DEL_DUMP
@@ -7047,6 +7454,9 @@ static int i40e_config_netdev(struct i40e_vsi *vsi)
        netdev->netdev_ops = &i40e_netdev_ops;
        netdev->watchdog_timeo = 5 * HZ;
        i40e_set_ethtool_ops(netdev);
+#ifdef I40E_FCOE
+       i40e_fcoe_config_netdev(netdev, vsi);
+#endif
 
        return 0;
 }
@@ -7166,7 +7576,6 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                 * should be set to zero by default.
                 */
                ctxt.info.switch_id = 0;
-               ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_LOCAL_LB);
                ctxt.info.switch_id |= cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
 
                /* Setup the VSI tx/rx queue map for TC0 only for now */
@@ -7200,6 +7609,16 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
 
+#ifdef I40E_FCOE
+       case I40E_VSI_FCOE:
+               ret = i40e_fcoe_vsi_init(vsi, &ctxt);
+               if (ret) {
+                       dev_info(&pf->pdev->dev, "failed to initialize FCoE VSI\n");
+                       return ret;
+               }
+               break;
+
+#endif /* I40E_FCOE */
        default:
                return -ENODEV;
        }
@@ -7223,6 +7642,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
        list_for_each_entry_safe(f, ftmp, &vsi->mac_filter_list, list) {
                f->changed = true;
                f_count++;
+
+               if (f->is_laa && vsi->type == I40E_VSI_MAIN) {
+                       i40e_aq_mac_address_write(&vsi->back->hw,
+                                                 I40E_AQC_WRITE_TYPE_LAA_WOL,
+                                                 f->macaddr, NULL);
+               }
        }
        if (f_count) {
                vsi->flags |= I40E_VSI_FLAG_FILTER_CHANGED;
@@ -7552,6 +7977,7 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
        /* setup the netdev if needed */
        case I40E_VSI_MAIN:
        case I40E_VSI_VMDQ2:
+       case I40E_VSI_FCOE:
                ret = i40e_config_netdev(vsi);
                if (ret)
                        goto err_netdev;
@@ -8090,7 +8516,6 @@ int i40e_fetch_switch_configuration(struct i40e_pf *pf, bool printconfig)
  **/
 static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
 {
-       u32 rxfc = 0, txfc = 0, rxfc_reg;
        int ret;
 
        /* find out what's out there already */
@@ -8150,68 +8575,13 @@ static int i40e_setup_pf_switch(struct i40e_pf *pf, bool reinit)
                i40e_config_rss(pf);
 
        /* fill in link information and enable LSE reporting */
-       i40e_aq_get_link_info(&pf->hw, true, NULL, NULL);
+       i40e_update_link_info(&pf->hw, true);
        i40e_link_event(pf);
 
        /* Initialize user-specific link properties */
        pf->fc_autoneg_status = ((pf->hw.phy.link_info.an_info &
                                  I40E_AQ_AN_COMPLETED) ? true : false);
-       /* requested_mode is set in probe or by ethtool */
-       if (!pf->fc_autoneg_status)
-               goto no_autoneg;
-
-       if ((pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX) &&
-           (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX))
-               pf->hw.fc.current_mode = I40E_FC_FULL;
-       else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
-               pf->hw.fc.current_mode = I40E_FC_TX_PAUSE;
-       else if (pf->hw.phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
-               pf->hw.fc.current_mode = I40E_FC_RX_PAUSE;
-       else
-               pf->hw.fc.current_mode = I40E_FC_NONE;
 
-       /* sync the flow control settings with the auto-neg values */
-       switch (pf->hw.fc.current_mode) {
-       case I40E_FC_FULL:
-               txfc = 1;
-               rxfc = 1;
-               break;
-       case I40E_FC_TX_PAUSE:
-               txfc = 1;
-               rxfc = 0;
-               break;
-       case I40E_FC_RX_PAUSE:
-               txfc = 0;
-               rxfc = 1;
-               break;
-       case I40E_FC_NONE:
-       case I40E_FC_DEFAULT:
-               txfc = 0;
-               rxfc = 0;
-               break;
-       case I40E_FC_PFC:
-               /* TBD */
-               break;
-       /* no default case, we have to handle all possibilities here */
-       }
-
-       wr32(&pf->hw, I40E_PRTDCB_FCCFG, txfc << I40E_PRTDCB_FCCFG_TFCE_SHIFT);
-
-       rxfc_reg = rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
-                  ~I40E_PRTDCB_MFLCN_RFCE_MASK;
-       rxfc_reg |= (rxfc << I40E_PRTDCB_MFLCN_RFCE_SHIFT);
-
-       wr32(&pf->hw, I40E_PRTDCB_MFLCN, rxfc_reg);
-
-       goto fc_complete;
-
-no_autoneg:
-       /* disable L2 flow control, user can turn it on if they wish */
-       wr32(&pf->hw, I40E_PRTDCB_FCCFG, 0);
-       wr32(&pf->hw, I40E_PRTDCB_MFLCN, rd32(&pf->hw, I40E_PRTDCB_MFLCN) &
-                                        ~I40E_PRTDCB_MFLCN_RFCE_MASK);
-
-fc_complete:
        i40e_ptp_init(pf);
 
        return ret;
@@ -8226,6 +8596,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        int queues_left;
 
        pf->num_lan_qps = 0;
+#ifdef I40E_FCOE
+       pf->num_fcoe_qps = 0;
+#endif
 
        /* Find the max queues to be put into basic use.  We'll always be
         * using TC0, whether or not DCB is running, and TC0 will get the
@@ -8241,6 +8614,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
 
                /* make sure all the fancies are disabled */
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+#ifdef I40E_FCOE
+                              I40E_FLAG_FCOE_ENABLED   |
+#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_CAPABLE    |
@@ -8255,6 +8631,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                queues_left -= pf->num_lan_qps;
 
                pf->flags &= ~(I40E_FLAG_RSS_ENABLED    |
+#ifdef I40E_FCOE
+                              I40E_FLAG_FCOE_ENABLED   |
+#endif
                               I40E_FLAG_FD_SB_ENABLED  |
                               I40E_FLAG_FD_ATR_ENABLED |
                               I40E_FLAG_DCB_ENABLED    |
@@ -8270,6 +8649,22 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
                queues_left -= pf->num_lan_qps;
        }
 
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED) {
+               if (I40E_DEFAULT_FCOE <= queues_left) {
+                       pf->num_fcoe_qps = I40E_DEFAULT_FCOE;
+               } else if (I40E_MINIMUM_FCOE <= queues_left) {
+                       pf->num_fcoe_qps = I40E_MINIMUM_FCOE;
+               } else {
+                       pf->num_fcoe_qps = 0;
+                       pf->flags &= ~I40E_FLAG_FCOE_ENABLED;
+                       dev_info(&pf->pdev->dev, "not enough queues for FCoE. FCoE feature will be disabled\n");
+               }
+
+               queues_left -= pf->num_fcoe_qps;
+       }
+
+#endif
        if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
                if (queues_left > 1) {
                        queues_left -= 1; /* save 1 queue for FD */
@@ -8294,6 +8689,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
        }
 
        pf->queues_left = queues_left;
+#ifdef I40E_FCOE
+       dev_info(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
+#endif
 }
 
 /**
@@ -8360,6 +8758,10 @@ static void i40e_print_features(struct i40e_pf *pf)
                buf += sprintf(buf, "DCB ");
        if (pf->flags & I40E_FLAG_PTP)
                buf += sprintf(buf, "PTP ");
+#ifdef I40E_FCOE
+       if (pf->flags & I40E_FLAG_FCOE_ENABLED)
+               buf += sprintf(buf, "FCOE ");
+#endif
 
        BUG_ON(buf > (string + INFO_STRING_LEN));
        dev_info(&pf->pdev->dev, "%s\n", string);
@@ -8460,6 +8862,7 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
 
        /* Reset here to make sure all is clean and to define PF 'n' */
+       i40e_clear_hw(hw);
        err = i40e_pf_reset(hw);
        if (err) {
                dev_info(&pdev->dev, "Initial pf_reset failed: %d\n", err);
@@ -8489,12 +8892,20 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        dev_info(&pdev->dev, "%s\n", i40e_fw_version_str(hw));
        if (err) {
                dev_info(&pdev->dev,
-                        "init_adminq failed: %d expecting API %02x.%02x\n",
-                        err,
-                        I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
+                        "The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
                goto err_pf_reset;
        }
 
+       if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
+           hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
+               dev_info(&pdev->dev,
+                        "The driver for the device detected a newer version of the NVM image than expected. Please install the most recent version of the network driver.\n");
+       else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
+                hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1))
+               dev_info(&pdev->dev,
+                        "The driver for the device detected an older version of the NVM image than expected. Please update the NVM image.\n");
+
+
        i40e_verify_eeprom(pf);
 
        /* Rev 0 hardware was never productized */
@@ -8535,6 +8946,21 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        }
        dev_info(&pdev->dev, "MAC address: %pM\n", hw->mac.addr);
        ether_addr_copy(hw->mac.perm_addr, hw->mac.addr);
+       i40e_get_port_mac_addr(hw, hw->mac.port_addr);
+       if (is_valid_ether_addr(hw->mac.port_addr))
+               pf->flags |= I40E_FLAG_PORT_ID_VALID;
+#ifdef I40E_FCOE
+       err = i40e_get_san_mac_addr(hw, hw->mac.san_addr);
+       if (err)
+               dev_info(&pdev->dev,
+                        "(non-fatal) SAN MAC retrieval failed: %d\n", err);
+       if (!is_valid_ether_addr(hw->mac.san_addr)) {
+               dev_warn(&pdev->dev, "invalid SAN MAC address %pM, falling back to LAN MAC\n",
+                        hw->mac.san_addr);
+               ether_addr_copy(hw->mac.san_addr, hw->mac.addr);
+       }
+       dev_info(&pf->pdev->dev, "SAN MAC: %pM\n", hw->mac.san_addr);
+#endif /* I40E_FCOE */
 
        pci_set_drvdata(pdev, pf);
        pci_save_state(pdev);
@@ -8651,6 +9077,11 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        mod_timer(&pf->service_timer,
                  round_jiffies(jiffies + pf->service_timer_period));
 
+#ifdef I40E_FCOE
+       /* create FCoE interface */
+       i40e_fcoe_vsi_setup(pf);
+
+#endif
        /* Get the negotiated link width and speed from PCI config space */
        pcie_capability_read_word(pf->pdev, PCI_EXP_LNKSTA, &link_status);
 
@@ -8722,7 +9153,6 @@ static void i40e_remove(struct pci_dev *pdev)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
        i40e_status ret_code;
-       u32 reg;
        int i;
 
        i40e_dbg_pf_exit(pf);
@@ -8800,11 +9230,6 @@ static void i40e_remove(struct pci_dev *pdev)
        kfree(pf->irq_pile);
        kfree(pf->vsi);
 
-       /* force a PF reset to clean anything leftover */
-       reg = rd32(&pf->hw, I40E_PFGEN_CTRL);
-       wr32(&pf->hw, I40E_PFGEN_CTRL, (reg | I40E_PFGEN_CTRL_PFSWR_MASK));
-       i40e_flush(&pf->hw);
-
        iounmap(pf->hw.hw_addr);
        kfree(pf);
        pci_release_selected_regions(pdev,
index 8129918..97bda3d 100644 (file)
@@ -240,6 +240,46 @@ i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
        return ret_code;
 }
 
+/**
+ * i40e_write_nvm_aq - Writes Shadow RAM.
+ * @hw: pointer to the HW structure.
+ * @module_pointer: module pointer location in words from the NVM beginning
+ * @offset: offset in words from module start
+ * @words: number of words to write
+ * @data: buffer with words to write to the Shadow RAM
+ * @last_command: tells the AdminQ that this is the last command
+ *
+ * Writes a 16 bit words buffer to the Shadow RAM using the admin command.
+ **/
+i40e_status i40e_write_nvm_aq(struct i40e_hw *hw, u8 module_pointer,
+                             u32 offset, u16 words, void *data,
+                             bool last_command)
+{
+       i40e_status ret_code = I40E_ERR_NVM;
+
+       /* Here we are checking the SR limit only for the flat memory model.
+        * We cannot do it for the module-based model, as we did not acquire
+        * the NVM resource yet (we cannot get the module pointer value).
+        * Firmware will check the module-based model.
+        */
+       if ((offset + words) > hw->nvm.sr_size)
+               hw_dbg(hw, "NVM write error: offset beyond Shadow RAM limit.\n");
+       else if (words > I40E_SR_SECTOR_SIZE_IN_WORDS)
+               /* We can write only up to 4KB (one sector), in one AQ write */
+               hw_dbg(hw, "NVM write fail error: cannot write more than 4KB in a single write.\n");
+       else if (((offset + (words - 1)) / I40E_SR_SECTOR_SIZE_IN_WORDS)
+                != (offset / I40E_SR_SECTOR_SIZE_IN_WORDS))
+               /* A single write cannot spread over two sectors */
+               hw_dbg(hw, "NVM write error: cannot spread over two sectors in a single write.\n");
+       else
+               ret_code = i40e_aq_update_nvm(hw, module_pointer,
+                                             2 * offset,  /*bytes*/
+                                             2 * words,   /*bytes*/
+                                             data, last_command, NULL);
+
+       return ret_code;
+}
+
 /**
  * i40e_calc_nvm_checksum - Calculates and returns the checksum
  * @hw: pointer to hardware structure
@@ -309,6 +349,27 @@ i40e_calc_nvm_checksum_exit:
        return ret_code;
 }
 
+/**
+ * i40e_update_nvm_checksum - Updates the NVM checksum
+ * @hw: pointer to hardware structure
+ *
+ * NVM ownership must be acquired before calling this function and released
+ * on ARQ completion event reception by caller.
+ * This function will commit SR to NVM.
+ **/
+i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw)
+{
+       i40e_status ret_code = 0;
+       u16 checksum;
+
+       ret_code = i40e_calc_nvm_checksum(hw, &checksum);
+       if (!ret_code)
+               ret_code = i40e_write_nvm_aq(hw, 0x00, I40E_SR_SW_CHECKSUM_WORD,
+                                            1, &checksum, true);
+
+       return ret_code;
+}
+
 /**
  * i40e_validate_nvm_checksum - Validate EEPROM checksum
  * @hw: pointer to hardware structure
@@ -324,13 +385,9 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
        u16 checksum_sr = 0;
        u16 checksum_local = 0;
 
-       ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
-       if (ret_code)
-               goto i40e_validate_nvm_checksum_exit;
-
        ret_code = i40e_calc_nvm_checksum(hw, &checksum_local);
        if (ret_code)
-               goto i40e_validate_nvm_checksum_free;
+               goto i40e_validate_nvm_checksum_exit;
 
        /* Do not use i40e_read_nvm_word() because we do not want to take
         * the synchronization semaphores twice here.
@@ -347,9 +404,456 @@ i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
        if (checksum)
                *checksum = checksum_local;
 
-i40e_validate_nvm_checksum_free:
-       i40e_release_nvm(hw);
-
 i40e_validate_nvm_checksum_exit:
        return ret_code;
 }
+
+static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+                                         struct i40e_nvm_access *cmd,
+                                         u8 *bytes, int *errno);
+static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+                                            struct i40e_nvm_access *cmd,
+                                            u8 *bytes, int *errno);
+static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+                                            struct i40e_nvm_access *cmd,
+                                            u8 *bytes, int *errno);
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+                                               struct i40e_nvm_access *cmd,
+                                               int *errno);
+static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+                                        struct i40e_nvm_access *cmd,
+                                        int *errno);
+static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+                                        struct i40e_nvm_access *cmd,
+                                        u8 *bytes, int *errno);
+static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+                                       struct i40e_nvm_access *cmd,
+                                       u8 *bytes, int *errno);
+static inline u8 i40e_nvmupd_get_module(u32 val)
+{
+       return (u8)(val & I40E_NVM_MOD_PNT_MASK);
+}
+static inline u8 i40e_nvmupd_get_transaction(u32 val)
+{
+       return (u8)((val & I40E_NVM_TRANS_MASK) >> I40E_NVM_TRANS_SHIFT);
+}
+
+/**
+ * i40e_nvmupd_command - Process an NVM update command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * Dispatches command depending on what update state is current
+ **/
+i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+                               struct i40e_nvm_access *cmd,
+                               u8 *bytes, int *errno)
+{
+       i40e_status status;
+
+       /* assume success */
+       *errno = 0;
+
+       switch (hw->nvmupd_state) {
+       case I40E_NVMUPD_STATE_INIT:
+               status = i40e_nvmupd_state_init(hw, cmd, bytes, errno);
+               break;
+
+       case I40E_NVMUPD_STATE_READING:
+               status = i40e_nvmupd_state_reading(hw, cmd, bytes, errno);
+               break;
+
+       case I40E_NVMUPD_STATE_WRITING:
+               status = i40e_nvmupd_state_writing(hw, cmd, bytes, errno);
+               break;
+
+       default:
+               /* invalid state, should never happen */
+               status = I40E_NOT_SUPPORTED;
+               *errno = -ESRCH;
+               break;
+       }
+       return status;
+}
+
+/**
+ * i40e_nvmupd_state_init - Handle NVM update state Init
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * Process legitimate commands of the Init state and conditionally set next
+ * state. Reject all other commands.
+ **/
+static i40e_status i40e_nvmupd_state_init(struct i40e_hw *hw,
+                                         struct i40e_nvm_access *cmd,
+                                         u8 *bytes, int *errno)
+{
+       i40e_status status = 0;
+       enum i40e_nvmupd_cmd upd_cmd;
+
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+
+       switch (upd_cmd) {
+       case I40E_NVMUPD_READ_SA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (status) {
+                       *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+                       i40e_release_nvm(hw);
+               }
+               break;
+
+       case I40E_NVMUPD_READ_SNT:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+               if (status) {
+                       *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_READING;
+               }
+               break;
+
+       case I40E_NVMUPD_WRITE_ERA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_erase(hw, cmd, errno);
+                       if (status)
+                               i40e_release_nvm(hw);
+                       else
+                               hw->aq.nvm_release_on_done = true;
+               }
+               break;
+
+       case I40E_NVMUPD_WRITE_SA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+                       if (status)
+                               i40e_release_nvm(hw);
+                       else
+                               hw->aq.nvm_release_on_done = true;
+               }
+               break;
+
+       case I40E_NVMUPD_WRITE_SNT:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+               } else {
+                       status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+               }
+               break;
+
+       case I40E_NVMUPD_CSUM_SA:
+               status = i40e_acquire_nvm(hw, I40E_RESOURCE_WRITE);
+               if (status) {
+                       *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+               } else {
+                       status = i40e_update_nvm_checksum(hw);
+                       if (status) {
+                               *errno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+                                  -EIO;
+                               i40e_release_nvm(hw);
+                       } else {
+                               hw->aq.nvm_release_on_done = true;
+                       }
+               }
+               break;
+
+       default:
+               status = I40E_ERR_NVM;
+               *errno = -ESRCH;
+               break;
+       }
+       return status;
+}
+
+/**
+ * i40e_nvmupd_state_reading - Handle NVM update state Reading
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * NVM ownership is already held.  Process legitimate commands and set any
+ * change in state; reject all other commands.
+ **/
+static i40e_status i40e_nvmupd_state_reading(struct i40e_hw *hw,
+                                            struct i40e_nvm_access *cmd,
+                                            u8 *bytes, int *errno)
+{
+       i40e_status status;
+       enum i40e_nvmupd_cmd upd_cmd;
+
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+
+       switch (upd_cmd) {
+       case I40E_NVMUPD_READ_SA:
+       case I40E_NVMUPD_READ_CON:
+               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+               break;
+
+       case I40E_NVMUPD_READ_LCB:
+               status = i40e_nvmupd_nvm_read(hw, cmd, bytes, errno);
+               i40e_release_nvm(hw);
+               hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               break;
+
+       default:
+               status = I40E_NOT_SUPPORTED;
+               *errno = -ESRCH;
+               break;
+       }
+       return status;
+}
+
+/**
+ * i40e_nvmupd_state_writing - Handle NVM update state Writing
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * NVM ownership is already held.  Process legitimate commands and set any
+ * change in state; reject all other commands
+ **/
+static i40e_status i40e_nvmupd_state_writing(struct i40e_hw *hw,
+                                            struct i40e_nvm_access *cmd,
+                                            u8 *bytes, int *errno)
+{
+       i40e_status status;
+       enum i40e_nvmupd_cmd upd_cmd;
+
+       upd_cmd = i40e_nvmupd_validate_command(hw, cmd, errno);
+
+       switch (upd_cmd) {
+       case I40E_NVMUPD_WRITE_CON:
+               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+               break;
+
+       case I40E_NVMUPD_WRITE_LCB:
+               status = i40e_nvmupd_nvm_write(hw, cmd, bytes, errno);
+               if (!status) {
+                       hw->aq.nvm_release_on_done = true;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               }
+               break;
+
+       case I40E_NVMUPD_CSUM_CON:
+               status = i40e_update_nvm_checksum(hw);
+               if (status)
+                       *errno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+                                  -EIO;
+               break;
+
+       case I40E_NVMUPD_CSUM_LCB:
+               status = i40e_update_nvm_checksum(hw);
+               if (status) {
+                       *errno = hw->aq.asq_last_status ?
+                                  i40e_aq_rc_to_posix(hw->aq.asq_last_status) :
+                                  -EIO;
+               } else {
+                       hw->aq.nvm_release_on_done = true;
+                       hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+               }
+               break;
+
+       default:
+               status = I40E_NOT_SUPPORTED;
+               *errno = -ESRCH;
+               break;
+       }
+       return status;
+}
+
+/**
+ * i40e_nvmupd_validate_command - Validate given command
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @errno: pointer to return error code
+ *
+ * Return one of the valid command types or I40E_NVMUPD_INVALID
+ **/
+static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
+                                                struct i40e_nvm_access *cmd,
+                                                int *errno)
+{
+       enum i40e_nvmupd_cmd upd_cmd;
+       u8 transaction, module;
+
+       /* anything that doesn't match a recognized case is an error */
+       upd_cmd = I40E_NVMUPD_INVALID;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+
+       /* limits on data size */
+       if ((cmd->data_size < 1) ||
+           (cmd->data_size > I40E_NVMUPD_MAX_DATA)) {
+               hw_dbg(hw, "i40e_nvmupd_validate_command data_size %d\n",
+                      cmd->data_size);
+               *errno = -EFAULT;
+               return I40E_NVMUPD_INVALID;
+       }
+
+       switch (cmd->command) {
+       case I40E_NVM_READ:
+               switch (transaction) {
+               case I40E_NVM_CON:
+                       upd_cmd = I40E_NVMUPD_READ_CON;
+                       break;
+               case I40E_NVM_SNT:
+                       upd_cmd = I40E_NVMUPD_READ_SNT;
+                       break;
+               case I40E_NVM_LCB:
+                       upd_cmd = I40E_NVMUPD_READ_LCB;
+                       break;
+               case I40E_NVM_SA:
+                       upd_cmd = I40E_NVMUPD_READ_SA;
+                       break;
+               }
+               break;
+
+       case I40E_NVM_WRITE:
+               switch (transaction) {
+               case I40E_NVM_CON:
+                       upd_cmd = I40E_NVMUPD_WRITE_CON;
+                       break;
+               case I40E_NVM_SNT:
+                       upd_cmd = I40E_NVMUPD_WRITE_SNT;
+                       break;
+               case I40E_NVM_LCB:
+                       upd_cmd = I40E_NVMUPD_WRITE_LCB;
+                       break;
+               case I40E_NVM_SA:
+                       upd_cmd = I40E_NVMUPD_WRITE_SA;
+                       break;
+               case I40E_NVM_ERA:
+                       upd_cmd = I40E_NVMUPD_WRITE_ERA;
+                       break;
+               case I40E_NVM_CSUM:
+                       upd_cmd = I40E_NVMUPD_CSUM_CON;
+                       break;
+               case (I40E_NVM_CSUM|I40E_NVM_SA):
+                       upd_cmd = I40E_NVMUPD_CSUM_SA;
+                       break;
+               case (I40E_NVM_CSUM|I40E_NVM_LCB):
+                       upd_cmd = I40E_NVMUPD_CSUM_LCB;
+                       break;
+               }
+               break;
+       }
+
+       if (upd_cmd == I40E_NVMUPD_INVALID) {
+               *errno = -EFAULT;
+               hw_dbg(hw,
+                      "i40e_nvmupd_validate_command returns %d  errno: %d\n",
+                      upd_cmd, *errno);
+       }
+       return upd_cmd;
+}
+
+/**
+ * i40e_nvmupd_nvm_read - Read NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * cmd structure contains identifiers and data buffer
+ **/
+static i40e_status i40e_nvmupd_nvm_read(struct i40e_hw *hw,
+                                       struct i40e_nvm_access *cmd,
+                                       u8 *bytes, int *errno)
+{
+       i40e_status status;
+       u8 module, transaction;
+       bool last;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+       last = (transaction == I40E_NVM_LCB) || (transaction == I40E_NVM_SA);
+       hw_dbg(hw, "i40e_nvmupd_nvm_read mod 0x%x  off 0x%x  len 0x%x\n",
+              module, cmd->offset, cmd->data_size);
+
+       status = i40e_aq_read_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+                                 bytes, last, NULL);
+       hw_dbg(hw, "i40e_nvmupd_nvm_read status %d\n", status);
+       if (status)
+               *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+       return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_erase - Erase an NVM module
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @errno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static i40e_status i40e_nvmupd_nvm_erase(struct i40e_hw *hw,
+                                        struct i40e_nvm_access *cmd,
+                                        int *errno)
+{
+       i40e_status status = 0;
+       u8 module, transaction;
+       bool last;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+       last = (transaction & I40E_NVM_LCB);
+       hw_dbg(hw, "i40e_nvmupd_nvm_erase mod 0x%x  off 0x%x  len 0x%x\n",
+              module, cmd->offset, cmd->data_size);
+       status = i40e_aq_erase_nvm(hw, module, cmd->offset, (u16)cmd->data_size,
+                                  last, NULL);
+       hw_dbg(hw, "i40e_nvmupd_nvm_erase status %d\n", status);
+       if (status)
+               *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+       return status;
+}
+
+/**
+ * i40e_nvmupd_nvm_write - Write NVM
+ * @hw: pointer to hardware structure
+ * @cmd: pointer to nvm update command buffer
+ * @bytes: pointer to the data buffer
+ * @errno: pointer to return error code
+ *
+ * module, offset, data_size and data are in cmd structure
+ **/
+static i40e_status i40e_nvmupd_nvm_write(struct i40e_hw *hw,
+                                        struct i40e_nvm_access *cmd,
+                                        u8 *bytes, int *errno)
+{
+       i40e_status status = 0;
+       u8 module, transaction;
+       bool last;
+
+       transaction = i40e_nvmupd_get_transaction(cmd->config);
+       module = i40e_nvmupd_get_module(cmd->config);
+       last = (transaction & I40E_NVM_LCB);
+       hw_dbg(hw, "i40e_nvmupd_nvm_write mod 0x%x off 0x%x len 0x%x\n",
+              module, cmd->offset, cmd->data_size);
+       status = i40e_aq_update_nvm(hw, module, cmd->offset,
+                                   (u16)cmd->data_size, bytes, last, NULL);
+       hw_dbg(hw, "i40e_nvmupd_nvm_write status %d\n", status);
+       if (status)
+               *errno = i40e_aq_rc_to_posix(hw->aq.asq_last_status);
+
+       return status;
+}
index ecd0f0b..045b5c4 100644 (file)
@@ -78,4 +78,7 @@ do {                                                            \
 } while (0)
 
 typedef enum i40e_status_code i40e_status;
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+#define I40E_FCOE
+#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */
 #endif /* _I40E_OSDEP_H_ */
index a430699..949a9a0 100644 (file)
@@ -70,17 +70,31 @@ i40e_status i40e_aq_get_firmware_version(struct i40e_hw *hw,
                                u16 *fw_major_version, u16 *fw_minor_version,
                                u16 *api_major_version, u16 *api_minor_version,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_debug_write_register(struct i40e_hw *hw,
+                                       u32 reg_addr, u64 reg_val,
+                                       struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
                                struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
+                       bool qualified_modules, bool report_init,
+                       struct i40e_aq_get_phy_abilities_resp *abilities,
+                       struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_phy_config(struct i40e_hw *hw,
+                               struct i40e_aq_set_phy_config *config,
+                               struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
+                                 bool atomic_reset);
 i40e_status i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_set_link_restart_an(struct i40e_hw *hw,
-                               struct i40e_asq_cmd_details *cmd_details);
+                                       bool enable_link,
+                                       struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_get_link_info(struct i40e_hw *hw,
                                bool enable_lse, struct i40e_link_status *link,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_update_link_info(struct i40e_hw *hw, bool enable_lse);
 i40e_status i40e_aq_set_local_advt_reg(struct i40e_hw *hw,
                                u64 advt_reg,
                                struct i40e_asq_cmd_details *cmd_details);
@@ -139,6 +153,9 @@ i40e_status i40e_aq_read_nvm(struct i40e_hw *hw, u8 module_pointer,
                                u32 offset, u16 length, void *data,
                                bool last_command,
                                struct i40e_asq_cmd_details *cmd_details);
+i40e_status i40e_aq_erase_nvm(struct i40e_hw *hw, u8 module_pointer,
+                             u32 offset, u16 length, bool last_command,
+                             struct i40e_asq_cmd_details *cmd_details);
 i40e_status i40e_aq_discover_capabilities(struct i40e_hw *hw,
                                void *buff, u16 buff_size, u16 *data_size,
                                enum i40e_admin_queue_opc list_type_opc,
@@ -216,12 +233,16 @@ i40e_status i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
 /* i40e_common */
 i40e_status i40e_init_shared_code(struct i40e_hw *hw);
 i40e_status i40e_pf_reset(struct i40e_hw *hw);
+void i40e_clear_hw(struct i40e_hw *hw);
 void i40e_clear_pxe_mode(struct i40e_hw *hw);
 bool i40e_get_link_status(struct i40e_hw *hw);
-i40e_status i40e_get_mac_addr(struct i40e_hw *hw,
-                                               u8 *mac_addr);
+i40e_status i40e_get_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+i40e_status i40e_get_port_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
 i40e_status i40e_validate_mac_addr(u8 *mac_addr);
 void i40e_pre_tx_queue_cfg(struct i40e_hw *hw, u32 queue, bool enable);
+#ifdef I40E_FCOE
+i40e_status i40e_get_san_mac_addr(struct i40e_hw *hw, u8 *mac_addr);
+#endif
 /* prototype for functions used for NVM access */
 i40e_status i40e_init_nvm(struct i40e_hw *hw);
 i40e_status i40e_acquire_nvm(struct i40e_hw *hw,
@@ -233,8 +254,12 @@ i40e_status i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
                                         u16 *data);
 i40e_status i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
                                           u16 *words, u16 *data);
+i40e_status i40e_update_nvm_checksum(struct i40e_hw *hw);
 i40e_status i40e_validate_nvm_checksum(struct i40e_hw *hw,
                                                 u16 *checksum);
+i40e_status i40e_nvmupd_command(struct i40e_hw *hw,
+                               struct i40e_nvm_access *cmd,
+                               u8 *bytes, int *);
 void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
 
 extern struct i40e_rx_ptype_decoded i40e_ptype_lookup[];
index 101f439..bb7fe98 100644 (file)
@@ -25,7 +25,6 @@
  ******************************************************************************/
 
 #include "i40e.h"
-#include <linux/export.h>
 #include <linux/ptp_classify.h>
 
 /* The XL710 timesync is very much like Intel's 82599 design when it comes to
@@ -216,7 +215,7 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
 }
 
 /**
- * i40e_ptp_enable - Enable/disable ancillary features of the PHC subsystem
+ * i40e_ptp_feature_enable - Enable/disable ancillary features of the PHC subsystem
  * @ptp: The PTP clock structure
  * @rq: The requested feature to change
  * @on: Enable/disable flag
@@ -224,8 +223,8 @@ static int i40e_ptp_settime(struct ptp_clock_info *ptp,
  * The XL710 does not support any of the ancillary features of the PHC
  * subsystem, so this function may just return.
  **/
-static int i40e_ptp_enable(struct ptp_clock_info *ptp,
-                          struct ptp_clock_request *rq, int on)
+static int i40e_ptp_feature_enable(struct ptp_clock_info *ptp,
+                                  struct ptp_clock_request *rq, int on)
 {
        return -EOPNOTSUPP;
 }
@@ -315,6 +314,7 @@ void i40e_ptp_tx_hwtstamp(struct i40e_pf *pf)
        skb_tstamp_tx(pf->ptp_tx_skb, &shhwtstamps);
        dev_kfree_skb_any(pf->ptp_tx_skb);
        pf->ptp_tx_skb = NULL;
+       clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
 }
 
 /**
@@ -423,28 +423,23 @@ int i40e_ptp_get_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
 }
 
 /**
- * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * i40e_ptp_set_timestamp_mode - setup hardware for requested timestamp mode
  * @pf: Board private structure
- * @ifreq: ioctl data
+ * @config: hwtstamp settings requested or saved
  *
- * Respond to the user filter requests and make the appropriate hardware
- * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
- * logic, so keep track in software of whether to indicate these timestamps
- * or not.
+ * Control hardware registers to enter the specific mode requested by the
+ * user. Also used during reset path to ensure that timestamp settings are
+ * maintained.
  *
- * It is permissible to "upgrade" the user request to a broader filter, as long
- * as the user receives the timestamps they care about and the user is notified
- * the filter has been broadened.
+ * Note: modifies config in place, and may update the requested mode to be
+ * more broad if the specific filter is not directly supported.
  **/
-int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+static int i40e_ptp_set_timestamp_mode(struct i40e_pf *pf,
+                                      struct hwtstamp_config *config)
 {
        struct i40e_hw *hw = &pf->hw;
-       struct hwtstamp_config *config = &pf->tstamp_config;
        u32 pf_id, tsyntype, regval;
 
-       if (copy_from_user(config, ifr->ifr_data, sizeof(*config)))
-               return -EFAULT;
-
        /* Reserved for future extensions. */
        if (config->flags)
                return -EINVAL;
@@ -452,8 +447,12 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
        /* Confirm that 1588 is supported on this PF. */
        pf_id = (rd32(hw, I40E_PRTTSYN_CTL0) & I40E_PRTTSYN_CTL0_PF_ID_MASK) >>
                I40E_PRTTSYN_CTL0_PF_ID_SHIFT;
-       if (hw->pf_id != pf_id)
-               return -EINVAL;
+       if (hw->pf_id != pf_id) {
+               dev_err(&pf->pdev->dev,
+                       "PF %d attempted to control timestamp mode on port %d, which is owned by PF %d\n",
+                       hw->pf_id, hw->port, pf_id);
+               return -EPERM;
+       }
 
        switch (config->tx_type) {
        case HWTSTAMP_TX_OFF:
@@ -535,23 +534,59 @@ int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
                wr32(hw, I40E_PRTTSYN_CTL1, regval);
        }
 
-       return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ?
+       return 0;
+}
+
+/**
+ * i40e_ptp_set_ts_config - ioctl interface to control the HW timestamping
+ * @pf: Board private structure
+ * @ifreq: ioctl data
+ *
+ * Respond to the user filter requests and make the appropriate hardware
+ * changes here. The XL710 cannot support splitting of the Tx/Rx timestamping
+ * logic, so keep track in software of whether to indicate these timestamps
+ * or not.
+ *
+ * It is permissible to "upgrade" the user request to a broader filter, as long
+ * as the user receives the timestamps they care about and the user is notified
+ * the filter has been broadened.
+ **/
+int i40e_ptp_set_ts_config(struct i40e_pf *pf, struct ifreq *ifr)
+{
+       struct hwtstamp_config config;
+       int err;
+
+       if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+               return -EFAULT;
+
+       err = i40e_ptp_set_timestamp_mode(pf, &config);
+       if (err)
+               return err;
+
+       /* save these settings for future reference */
+       pf->tstamp_config = config;
+
+       return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
                -EFAULT : 0;
 }
 
 /**
- * i40e_ptp_init - Initialize the 1588 support and register the PHC
+ * i40e_ptp_create_clock - Create PTP clock device for userspace
  * @pf: Board private structure
  *
- * This function registers the device clock as a PHC. If it is successful, it
- * starts the clock in the hardware.
+ * This function creates a new PTP clock device. It only creates one if we
+ * don't already have one, so it is safe to call. Will return error if it
+ * can't create one, but success if we already have a device. Should be used
+ * by i40e_ptp_init to create clock initially, and prevent global resets from
+ * creating new clock devices.
  **/
-void i40e_ptp_init(struct i40e_pf *pf)
+static long i40e_ptp_create_clock(struct i40e_pf *pf)
 {
-       struct i40e_hw *hw = &pf->hw;
-       struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+       /* no need to create a clock device if we already have one */
+       if (!IS_ERR_OR_NULL(pf->ptp_clock))
+               return 0;
 
-       strncpy(pf->ptp_caps.name, "i40e", sizeof(pf->ptp_caps.name));
+       strncpy(pf->ptp_caps.name, i40e_driver_name, sizeof(pf->ptp_caps.name));
        pf->ptp_caps.owner = THIS_MODULE;
        pf->ptp_caps.max_adj = 999999999;
        pf->ptp_caps.n_ext_ts = 0;
@@ -560,11 +595,46 @@ void i40e_ptp_init(struct i40e_pf *pf)
        pf->ptp_caps.adjtime = i40e_ptp_adjtime;
        pf->ptp_caps.gettime = i40e_ptp_gettime;
        pf->ptp_caps.settime = i40e_ptp_settime;
-       pf->ptp_caps.enable = i40e_ptp_enable;
+       pf->ptp_caps.enable = i40e_ptp_feature_enable;
 
        /* Attempt to register the clock before enabling the hardware. */
        pf->ptp_clock = ptp_clock_register(&pf->ptp_caps, &pf->pdev->dev);
        if (IS_ERR(pf->ptp_clock)) {
+               return PTR_ERR(pf->ptp_clock);
+       }
+
+       /* clear the hwtstamp settings here during clock create, instead of
+        * during regular init, so that we can maintain settings across a
+        * reset or suspend.
+        */
+       pf->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
+       pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
+
+       return 0;
+}
+
+/**
+ * i40e_ptp_init - Initialize the 1588 support after device probe or reset
+ * @pf: Board private structure
+ *
+ * This function sets device up for 1588 support. The first time it is run, it
+ * will create a PHC clock device. It does not create a clock device if one
+ * already exists. It also reconfigures the device after a reset.
+ **/
+void i40e_ptp_init(struct i40e_pf *pf)
+{
+       struct net_device *netdev = pf->vsi[pf->lan_vsi]->netdev;
+       struct i40e_hw *hw = &pf->hw;
+       long err;
+
+       /* we have to initialize the lock first, since we can't control
+        * when the user will enter the PHC device entry points
+        */
+       spin_lock_init(&pf->tmreg_lock);
+
+       /* ensure we have a clock device */
+       err = i40e_ptp_create_clock(pf);
+       if (err) {
                pf->ptp_clock = NULL;
                dev_err(&pf->pdev->dev, "%s: ptp_clock_register failed\n",
                        __func__);
@@ -572,8 +642,6 @@ void i40e_ptp_init(struct i40e_pf *pf)
                struct timespec ts;
                u32 regval;
 
-               spin_lock_init(&pf->tmreg_lock);
-
                dev_info(&pf->pdev->dev, "%s: added PHC on %s\n", __func__,
                         netdev->name);
                pf->flags |= I40E_FLAG_PTP;
@@ -589,8 +657,8 @@ void i40e_ptp_init(struct i40e_pf *pf)
                /* Set the increment value per clock tick. */
                i40e_ptp_set_increment(pf);
 
-               /* reset the tstamp_config */
-               memset(&pf->tstamp_config, 0, sizeof(pf->tstamp_config));
+               /* reset timestamping mode */
+               i40e_ptp_set_timestamp_mode(pf, &pf->tstamp_config);
 
                /* Set the clock value. */
                ts = ktime_to_timespec(ktime_get_real());
@@ -614,6 +682,7 @@ void i40e_ptp_stop(struct i40e_pf *pf)
        if (pf->ptp_tx_skb) {
                dev_kfree_skb_any(pf->ptp_tx_skb);
                pf->ptp_tx_skb = NULL;
+               clear_bit_unlock(__I40E_PTP_TX_IN_PROGRESS, &pf->state);
        }
 
        if (pf->ptp_clock) {
index 947de98..65d3c8b 100644 (file)
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-
-#define I40E_PF_ARQBAH 0x00080180
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
 #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
 #define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
 #define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
 #define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
 #define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
 #define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
 #define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
 #define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
 #define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
 #define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAH_MAX_INDEX 127
 #define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAL_MAX_INDEX 127
 #define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQH_MAX_INDEX 127
 #define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQLEN_MAX_INDEX 127
 #define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQT_MAX_INDEX 127
 #define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAH_MAX_INDEX 127
 #define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAL_MAX_INDEX 127
 #define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQH_MAX_INDEX 127
 #define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQLEN_MAX_INDEX 127
 #define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQT_MAX_INDEX 127
 #define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
 #define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL 0x0010C300
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
 #define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
 #define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
 #define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT 0x0010C380
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
 #define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
 #define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_PFCM_PE_ERRDATA 0x00138D00
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_PE_ERRINFO 0x00138C80
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
 #define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
 #define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
 #define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
 #define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
 #define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
 #define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
 #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
 #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
 #define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
 #define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
 #define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
 #define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
 #define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
 #define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
 #define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
 #define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
 #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
 #define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
 #define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
 #define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
 #define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
 #define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
 #define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TDPUC 0x00044100
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
 #define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
-#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
 #define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
 #define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
 #define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
 #define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
 #define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
 #define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
 #define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
 #define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
 #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
 #define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
 #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
 #define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
 #define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
 #define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
 #define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CCMD_MAX_INDEX 3
 #define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
 #define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
 #define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
 #define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
 #define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
 #define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
 #define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSCA_MAX_INDEX 3
 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
 #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
 #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSRWD_MAX_INDEX 3
 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
 #define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
 #define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_PE_ENA 0x000B81A0
-#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
-#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
 #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
 #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
 #define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
 #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
 #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
 #define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
 #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
 #define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
 #define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
 #define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
 #define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
 #define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
 #define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
 #define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
 #define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
 #define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
 #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
 #define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
 #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000
-#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
-#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
 #define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
 #define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
 #define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
 #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
 #define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
 #define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
 #define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFGEN_RSTAT1_MAX_INDEX 127
 #define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RSTAT_MAX_INDEX 383
 #define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RTRIG_MAX_INDEX 383
 #define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
-#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_CEQPART_MAX_INDEX 15
-#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
 #define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
 #define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
 #define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
 #define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
 #define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
 #define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
 #define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
 #define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_PEARPMAX 0x000C2038
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
-#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
-#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_PECQOBJSZ 0x000C2020
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTMAX 0x000C2030
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
-#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_PEMRMAX 0x000C2040
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
-#define I40E_GLHMC_PEMROBJSZ 0x000C203c
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
-#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_PEPBLMAX 0x000C206c
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1MAX 0x000C2054
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
-#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
-#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_PESRQMAX 0x000C2028
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_PETIMERMAX 0x000C2084
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
-#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX 0x000C204c
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX 0x000C2048
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
 #define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_SDPART_MAX_INDEX 15
 #define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
 #define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
-#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
-#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
-#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
-#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
-#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
-#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
 #define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
 #define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
 #define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
 #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
 #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
 #define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
 #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
 #define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
 #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_UFUSE 0x00094008
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
 #define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
 #define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
 #define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
 #define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
 #define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
 #define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
 #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_PFINT_CEQCTL_MAX_INDEX 511
 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
 #define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
 #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
 #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
 #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
 #define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
 #define I40E_PFINT_ITR0_MAX_INDEX 2
 #define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_ITRN_MAX_INDEX 2
 #define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
 #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_LNKLSTN_MAX_INDEX 511
 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
 #define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
 #define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_RATEN_MAX_INDEX 511
 #define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
 #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_RQCTL_MAX_INDEX 1535
 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_TQCTL_MAX_INDEX 1535
 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
 #define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_MAX_INDEX 127
 #define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
 #define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_ITR0_MAX_INDEX 2
 #define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN_MAX_INDEX 2
 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPINT_AEQCTL_MAX_INDEX 127
 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_VPINT_CEQCTL_MAX_INDEX 511
 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLST0_MAX_INDEX 127
 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLSTN_MAX_INDEX 511
 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_RATE0_MAX_INDEX 127
 #define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
 #define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_RATEN_MAX_INDEX 511
 #define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
 #define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
 #define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
 #define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
 #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-
-#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QRX_ENA_MAX_INDEX 1535
 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
 #define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QRX_TAIL_MAX_INDEX 1535
 #define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_CTL_MAX_INDEX 1535
 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
 #define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
 #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_ENA_MAX_INDEX 1535
 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
 #define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_HEAD_MAX_INDEX 1535
 #define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
 #define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_TAIL_MAX_INDEX 1535
 #define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_MAPENA_MAX_INDEX 127
 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_QTABLE_MAX_INDEX 15
 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QBASE_MAX_INDEX 383
 #define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QTABLE_MAX_INDEX 7
 #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
 #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
 #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
 #define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
 #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HLCTLA 0x001E4760
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSECTL1 0x001E3560
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
 #define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
-#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
 #define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
 #define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
 #define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
 #define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
 #define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
 #define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_METF_MAX_INDEX 3
 #define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
 #define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
 #define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
 #define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
 #define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
 #define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAH_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAL_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
 #define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
 #define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
 #define I40E_MSIX_PBA_MAX_INDEX 5
 #define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TADD_MAX_INDEX 128
 #define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TMSG_MAX_INDEX 128
 #define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TUADD_MAX_INDEX 128
 #define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TVCTRL_MAX_INDEX 128
 #define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
 #define I40E_VFMSIX_PBA1_MAX_INDEX 19
 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG1_MAX_INDEX 639
 #define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
 #define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
 #define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
 #define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
 #define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
 #define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
 #define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
 #define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
 #define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
 #define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
 #define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
 #define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
 #define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
 #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
 #define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
 #define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
 #define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
 #define I40E_GLNVM_PROTCSR_MAX_INDEX 59
 #define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
 #define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
 #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
 #define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
 #define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
 #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
 #define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-
-#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
 #define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
 #define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
 #define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
 #define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
 #define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
 #define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
 #define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
 #define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
 #define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
 #define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
 #define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
 #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
 #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
 #define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
 #define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
 #define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PCITEST2 0x000BE4BC
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
-
-#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
 #define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
 #define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
 #define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
 #define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
 #define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
 #define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SUBSYSID 0x000BE48C
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
-#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
 #define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
 #define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
 #define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
 #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
 #define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
 #define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
 #define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
 #define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
 #define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
 #define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
 #define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
 #define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
 #define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
 #define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
 #define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
 #define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PFDEVID 0x000BE080
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
 #define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
 #define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_VFDEVID 0x000BE100
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
 #define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
 #define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_GLPE_CPUSTATUS0 0x0000D040
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
-#define I40E_GLPE_CPUSTATUS1 0x0000D044
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
-#define I40E_GLPE_CPUSTATUS2 0x0000D048
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_AEQALLOC 0x00131180
-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_PFPE_CCQPHIGH 0x00008200
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_PFPE_CCQPLOW 0x00008180
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_PFPE_CCQPSTATUS 0x00008100
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_PFPE_CQACK 0x00131100
-#define I40E_PFPE_CQACK_PECQID_SHIFT 0
-#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
-#define I40E_PFPE_CQARM 0x00131080
-#define I40E_PFPE_CQARM_PECQID_SHIFT 0
-#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
-#define I40E_PFPE_CQPDB 0x00008000
-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_PFPE_CQPERRCODES 0x00008880
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_PFPE_CQPTAIL 0x00008080
-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_IPCONFIG0 0x00008280
-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-
-#define I40E_PFPE_MRTEIDXMASK 0x00008600
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_PFPE_TCPNOWTIMER 0x00008580
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_PFPE_UDACTRL 0x00008700
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN 0x00008780
-#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
-#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
-#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_WQEALLOC 0x00138C00
-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQACK_MAX_INDEX 127
-#define I40E_VFPE_CQACK_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQARM_MAX_INDEX 127
-#define I40E_VFPE_CQARM_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPDB_MAX_INDEX 127
-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
 #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
 #define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
 #define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
 #define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
 #define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
 #define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
 #define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
 #define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
 #define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
 #define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
 #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
 #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
 #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
 #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
 #define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
 #define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
 #define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
 #define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DHW_MAX_INDEX 7
 #define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DLW_MAX_INDEX 7
 #define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DPS_MAX_INDEX 7
 #define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SHT_MAX_INDEX 7
 #define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
 #define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SLT_MAX_INDEX 7
 #define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
 #define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
 #define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
-#define I40E_GLQF_APBVT_MAX_INDEX 2047
-#define I40E_GLQF_APBVT_APBVT_SHIFT 0
-#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
 #define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
 #define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
 #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
 #define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
 #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
 #define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
 #define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
 #define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
 #define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
 #define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_HSYM_MAX_INDEX 63
 #define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_GLQF_PCNT_MAX_INDEX 511
 #define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_SWAP_MAX_INDEX 1
 #define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
 #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
 #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
 #define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
 #define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
 #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
 #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_PFQF_HENA_MAX_INDEX 1
 #define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_PFQF_HKEY_MAX_INDEX 12
 #define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
 #define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
 #define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
 #define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_PFQF_HLUT_MAX_INDEX 127
 #define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
 #define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
 #define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
 #define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
-#define I40E_PFQF_HREGION_MAX_INDEX 7
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
 #define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_MSK_MAX_INDEX 63
 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
 #define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
 #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
 #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HENA1_MAX_INDEX 1
 #define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY1_MAX_INDEX 12
 #define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT1_MAX_INDEX 15
 #define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
 #define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
 #define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
 #define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION1_MAX_INDEX 7
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPQF_CTL_MAX_INDEX 127
 #define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
 #define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_CTL_MAX_INDEX 383
 #define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_TCREGION_MAX_INDEX 3
 #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOECRC_MAX_INDEX 143
 #define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDDPC_MAX_INDEX 143
 #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-/* _i=0...143 */
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXVC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOELAST_MAX_INDEX 143
 #define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPRC_MAX_INDEX 143
 #define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPTC_MAX_INDEX 143
 #define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOERPDC_MAX_INDEX 143
 #define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCH_MAX_INDEX 3
 #define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCL_MAX_INDEX 3
 #define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCH_MAX_INDEX 3
 #define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCL_MAX_INDEX 3
 #define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_CRCERRS_MAX_INDEX 3
 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCH_MAX_INDEX 3
 #define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCL_MAX_INDEX 3
 #define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCH_MAX_INDEX 3
 #define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCL_MAX_INDEX 3
 #define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ILLERRC_MAX_INDEX 3
 #define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LDPC_MAX_INDEX 3
 #define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MLFC_MAX_INDEX 3
 #define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCH_MAX_INDEX 3
 #define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCL_MAX_INDEX 3
 #define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCH_MAX_INDEX 3
 #define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCL_MAX_INDEX 3
 #define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MRFC_MAX_INDEX 3
 #define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127H_MAX_INDEX 3
 #define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127L_MAX_INDEX 3
 #define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255H_MAX_INDEX 3
 #define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255L_MAX_INDEX 3
 #define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511H_MAX_INDEX 3
 #define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511L_MAX_INDEX 3
 #define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64H_MAX_INDEX 3
 #define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64L_MAX_INDEX 3
 #define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127H_MAX_INDEX 3
 #define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127L_MAX_INDEX 3
 #define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255H_MAX_INDEX 3
 #define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255L_MAX_INDEX 3
 #define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511H_MAX_INDEX 3
 #define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511L_MAX_INDEX 3
 #define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64H_MAX_INDEX 3
 #define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64L_MAX_INDEX 3
 #define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RDPC_MAX_INDEX 3
 #define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RFC_MAX_INDEX 3
 #define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RJC_MAX_INDEX 3
 #define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RLEC_MAX_INDEX 3
 #define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ROC_MAX_INDEX 3
 #define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUC_MAX_INDEX 3
 #define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUPP_MAX_INDEX 3
 #define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
 #define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
-#define I40E_GLPRT_STDC_MAX_INDEX 3
-#define I40E_GLPRT_STDC_STDC_SHIFT 0
-#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDOLD_MAX_INDEX 3
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDPC_MAX_INDEX 3
 #define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCH_MAX_INDEX 3
 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCL_MAX_INDEX 3
 #define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCH_MAX_INDEX 3
 #define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCL_MAX_INDEX 3
 #define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCH_MAX_INDEX 15
 #define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCL_MAX_INDEX 15
 #define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCH_MAX_INDEX 15
 #define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCL_MAX_INDEX 15
 #define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCH_MAX_INDEX 15
 #define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCL_MAX_INDEX 15
 #define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCH_MAX_INDEX 15
 #define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCL_MAX_INDEX 15
 #define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCH_MAX_INDEX 15
 #define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCL_MAX_INDEX 15
 #define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCH_MAX_INDEX 15
 #define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCL_MAX_INDEX 15
 #define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_RUPP_MAX_INDEX 15
 #define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_TDPC_MAX_INDEX 15
 #define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCH_MAX_INDEX 15
 #define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCL_MAX_INDEX 15
 #define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCH_MAX_INDEX 15
 #define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCL_MAX_INDEX 15
 #define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCH_MAX_INDEX 383
 #define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCL_MAX_INDEX 383
 #define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCH_MAX_INDEX 383
 #define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCL_MAX_INDEX 383
 #define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCH_MAX_INDEX 383
 #define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCL_MAX_INDEX 383
 #define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCH_MAX_INDEX 383
 #define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCL_MAX_INDEX 383
 #define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCH_MAX_INDEX 383
 #define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCL_MAX_INDEX 383
 #define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCH_MAX_INDEX 383
 #define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCL_MAX_INDEX 383
 #define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RDPC_MAX_INDEX 383
 #define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RUPP_MAX_INDEX 383
 #define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_TEPC_MAX_INDEX 383
 #define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCH_MAX_INDEX 383
 #define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCL_MAX_INDEX 383
 #define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCH_MAX_INDEX 383
 #define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCL_MAX_INDEX 383
 #define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
 #define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
 #define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
 #define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRT_MSCCNT 0x00256BA0
-#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
-#define I40E_PRT_SCSTS 0x00256C20
-#define I40E_PRT_SCSTS_BSCA_SHIFT 0
-#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
-#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
-#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
-#define I40E_PRT_SCSTS_MSCA_SHIFT 2
-#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
-#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
-#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
-#define I40E_PRT_SWT_BSCCNT 0x00256C60
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
 #define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
 #define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
 #define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
 #define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
 #define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_CLKO_MAX_INDEX 1
 #define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
 #define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
 #define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
 #define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
 #define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
 #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
 #define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
 #define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
 #define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480
-#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 8
-#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
 #define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
 #define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
 #define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
 #define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_RX_MAX_INDEX 127
 #define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_TX_MAX_INDEX 127
 #define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
 #define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
 #define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
 #define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
 #define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
 #define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
 #define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
 #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
 #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
 #define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
 #define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
 #define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
 #define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
 #define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
 #define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
 #define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
 #define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
 #define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
 #define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
 #define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
 #define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
 #define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
 #define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
 #define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
 #define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
 #define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
 #define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
 #define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAH_MAX_INDEX 3
 #define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
 #define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
 #define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
 #define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAL_MAX_INDEX 3
 #define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
 #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
 #define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
-#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
 #define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
-#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
 #define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
 #define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
-#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
 #define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
 #define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
-#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
 #define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
-#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
 #define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
 #define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
 #define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
 #define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
 #define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
 #define I40E_VFINT_ITR01_MAX_INDEX 2
 #define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN1_MAX_INDEX 2
 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_QRX_TAIL1_MAX_INDEX 15
 #define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
 #define I40E_QTX_TAIL1_MAX_INDEX 15
 #define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
 #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD_MAX_INDEX 16
 #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG_MAX_INDEX 16
 #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD_MAX_INDEX 16
 #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
 #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_VFQF_HENA_MAX_INDEX 1
 #define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY_MAX_INDEX 12
 #define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT_MAX_INDEX 15
 #define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
 #define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
 #define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
 #define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION_MAX_INDEX 7
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
 #endif
index e49f31d..a51aa37 100644 (file)
@@ -39,6 +39,7 @@ static inline __le64 build_ctob(u32 td_cmd, u32 td_offset, unsigned int size,
 }
 
 #define I40E_TXD_CMD (I40E_TX_DESC_CMD_EOP | I40E_TX_DESC_CMD_RS)
+#define I40E_FD_CLEAN_DELAY 10
 /**
  * i40e_program_fdir_filter - Program a Flow Director filter
  * @fdir_data: Packet data that will be filter parameters
@@ -50,7 +51,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                             struct i40e_pf *pf, bool add)
 {
        struct i40e_filter_program_desc *fdir_desc;
-       struct i40e_tx_buffer *tx_buf;
+       struct i40e_tx_buffer *tx_buf, *first;
        struct i40e_tx_desc *tx_desc;
        struct i40e_ring *tx_ring;
        unsigned int fpt, dcc;
@@ -58,6 +59,7 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        struct device *dev;
        dma_addr_t dma;
        u32 td_cmd = 0;
+       u16 delay = 0;
        u16 i;
 
        /* find existing FDIR VSI */
@@ -71,6 +73,17 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_ring = vsi->tx_rings[0];
        dev = tx_ring->dev;
 
+       /* we need two descriptors to add/del a filter and we can wait */
+       do {
+               if (I40E_DESC_UNUSED(tx_ring) > 1)
+                       break;
+               msleep_interruptible(1);
+               delay++;
+       } while (delay < I40E_FD_CLEAN_DELAY);
+
+       if (!(I40E_DESC_UNUSED(tx_ring) > 1))
+               return -EAGAIN;
+
        dma = dma_map_single(dev, raw_packet,
                             I40E_FDIR_MAX_RAW_PACKET_SIZE, DMA_TO_DEVICE);
        if (dma_mapping_error(dev, dma))
@@ -79,8 +92,10 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        /* grab the next descriptor */
        i = tx_ring->next_to_use;
        fdir_desc = I40E_TX_FDIRDESC(tx_ring, i);
+       first = &tx_ring->tx_bi[i];
+       memset(first, 0, sizeof(struct i40e_tx_buffer));
 
-       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+       tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
 
        fpt = (fdir_data->q_index << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
              I40E_TXD_FLTR_QW0_QINDEX_MASK;
@@ -100,8 +115,6 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                        I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT) &
                       I40E_TXD_FLTR_QW0_DEST_VSI_MASK;
 
-       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
-
        dcc = I40E_TX_DESC_DTYPE_FILTER_PROG;
 
        if (add)
@@ -124,6 +137,8 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
                        I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
        }
 
+       fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(fpt);
+       fdir_desc->rsvd = cpu_to_le32(0);
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dcc);
        fdir_desc->fd_id = cpu_to_le32(fdir_data->fd_id);
 
@@ -132,7 +147,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_desc = I40E_TX_DESC(tx_ring, i);
        tx_buf = &tx_ring->tx_bi[i];
 
-       tx_ring->next_to_use = (i + 1 < tx_ring->count) ? i + 1 : 0;
+       tx_ring->next_to_use = ((i + 1) < tx_ring->count) ? i + 1 : 0;
+
+       memset(tx_buf, 0, sizeof(struct i40e_tx_buffer));
 
        /* record length, and DMA address */
        dma_unmap_len_set(tx_buf, len, I40E_FDIR_MAX_RAW_PACKET_SIZE);
@@ -141,6 +158,9 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_desc->buffer_addr = cpu_to_le64(dma);
        td_cmd = I40E_TXD_CMD | I40E_TX_DESC_CMD_DUMMY;
 
+       tx_buf->tx_flags = I40E_TX_FLAGS_FD_SB;
+       tx_buf->raw_buf = (void *)raw_packet;
+
        tx_desc->cmd_type_offset_bsz =
                build_ctob(td_cmd, 0, I40E_FDIR_MAX_RAW_PACKET_SIZE, 0);
 
@@ -148,14 +168,12 @@ int i40e_program_fdir_filter(struct i40e_fdir_filter *fdir_data, u8 *raw_packet,
        tx_buf->time_stamp = jiffies;
 
        /* Force memory writes to complete before letting h/w
-        * know there are new descriptors to fetch.  (Only
-        * applicable for weak-ordered memory model archs,
-        * such as IA-64).
+        * know there are new descriptors to fetch.
         */
        wmb();
 
        /* Mark the data descriptor to be watched */
-       tx_buf->next_to_watch = tx_desc;
+       first->next_to_watch = tx_desc;
 
        writel(tx_ring->next_to_use, tx_ring->tail);
        return 0;
@@ -170,24 +188,27 @@ dma_fail:
  * i40e_add_del_fdir_udpv4 - Add/Remove UDPv4 filters
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Returns 0 if the filters were successfully added or removed
  **/
 static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
                                   struct i40e_fdir_filter *fd_data,
-                                  u8 *raw_packet, bool add)
+                                  bool add)
 {
        struct i40e_pf *pf = vsi->back;
        struct udphdr *udp;
        struct iphdr *ip;
        bool err = false;
+       u8 *raw_packet;
        int ret;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x1c, 0, 0, 0x40, 0, 0x40, 0x11, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
 
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
        memcpy(raw_packet, packet, I40E_UDPIP_DUMMY_PACKET_LEN);
 
        ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
@@ -220,19 +241,19 @@ static int i40e_add_del_fdir_udpv4(struct i40e_vsi *vsi,
  * i40e_add_del_fdir_tcpv4 - Add/Remove TCPv4 filters
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Returns 0 if the filters were successfully added or removed
  **/
 static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                                   struct i40e_fdir_filter *fd_data,
-                                  u8 *raw_packet, bool add)
+                                  bool add)
 {
        struct i40e_pf *pf = vsi->back;
        struct tcphdr *tcp;
        struct iphdr *ip;
        bool err = false;
+       u8 *raw_packet;
        int ret;
        /* Dummy packet */
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
@@ -240,6 +261,9 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x80, 0x11,
                0x0, 0x72, 0, 0, 0, 0};
 
+       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+       if (!raw_packet)
+               return -ENOMEM;
        memcpy(raw_packet, packet, I40E_TCPIP_DUMMY_PACKET_LEN);
 
        ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
@@ -271,19 +295,6 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
                         fd_data->pctype, ret);
        }
 
-       fd_data->pctype = I40E_FILTER_PCTYPE_NONF_IPV4_TCP;
-
-       ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
-       if (ret) {
-               dev_info(&pf->pdev->dev,
-                        "Filter command send failed for PCTYPE %d (ret = %d)\n",
-                        fd_data->pctype, ret);
-               err = true;
-       } else {
-               dev_info(&pf->pdev->dev, "Filter OK for PCTYPE %d (ret = %d)\n",
-                         fd_data->pctype, ret);
-       }
-
        return err ? -EOPNOTSUPP : 0;
 }
 
@@ -292,14 +303,13 @@ static int i40e_add_del_fdir_tcpv4(struct i40e_vsi *vsi,
  * a specific flow spec
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Always returns -EOPNOTSUPP
  **/
 static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
                                    struct i40e_fdir_filter *fd_data,
-                                   u8 *raw_packet, bool add)
+                                   bool add)
 {
        return -EOPNOTSUPP;
 }
@@ -310,33 +320,36 @@ static int i40e_add_del_fdir_sctpv4(struct i40e_vsi *vsi,
  * a specific flow spec
  * @vsi: pointer to the targeted VSI
  * @fd_data: the flow director data required for the FDir descriptor
- * @raw_packet: the pre-allocated packet buffer for FDir
  * @add: true adds a filter, false removes it
  *
  * Returns 0 if the filters were successfully added or removed
  **/
 static int i40e_add_del_fdir_ipv4(struct i40e_vsi *vsi,
                                  struct i40e_fdir_filter *fd_data,
-                                 u8 *raw_packet, bool add)
+                                 bool add)
 {
        struct i40e_pf *pf = vsi->back;
        struct iphdr *ip;
        bool err = false;
+       u8 *raw_packet;
        int ret;
        int i;
        static char packet[] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0x08, 0,
                0x45, 0, 0, 0x14, 0, 0, 0x40, 0, 0x40, 0x10, 0, 0, 0, 0, 0, 0,
                0, 0, 0, 0};
 
-       memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
-       ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
-
-       ip->saddr = fd_data->src_ip[0];
-       ip->daddr = fd_data->dst_ip[0];
-       ip->protocol = 0;
-
        for (i = I40E_FILTER_PCTYPE_NONF_IPV4_OTHER;
             i <= I40E_FILTER_PCTYPE_FRAG_IPV4; i++) {
+               raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
+               if (!raw_packet)
+                       return -ENOMEM;
+               memcpy(raw_packet, packet, I40E_IP_DUMMY_PACKET_LEN);
+               ip = (struct iphdr *)(raw_packet + IP_HEADER_OFFSET);
+
+               ip->saddr = fd_data->src_ip[0];
+               ip->daddr = fd_data->dst_ip[0];
+               ip->protocol = 0;
+
                fd_data->pctype = i;
                ret = i40e_program_fdir_filter(fd_data, raw_packet, pf, add);
 
@@ -366,50 +379,34 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                      struct i40e_fdir_filter *input, bool add)
 {
        struct i40e_pf *pf = vsi->back;
-       u8 *raw_packet;
        int ret;
 
-       /* Populate the Flow Director that we have at the moment
-        * and allocate the raw packet buffer for the calling functions
-        */
-       raw_packet = kzalloc(I40E_FDIR_MAX_RAW_PACKET_SIZE, GFP_KERNEL);
-       if (!raw_packet)
-               return -ENOMEM;
-
        switch (input->flow_type & ~FLOW_EXT) {
        case TCP_V4_FLOW:
-               ret = i40e_add_del_fdir_tcpv4(vsi, input, raw_packet,
-                                             add);
+               ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
                break;
        case UDP_V4_FLOW:
-               ret = i40e_add_del_fdir_udpv4(vsi, input, raw_packet,
-                                             add);
+               ret = i40e_add_del_fdir_udpv4(vsi, input, add);
                break;
        case SCTP_V4_FLOW:
-               ret = i40e_add_del_fdir_sctpv4(vsi, input, raw_packet,
-                                              add);
+               ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
                break;
        case IPV4_FLOW:
-               ret = i40e_add_del_fdir_ipv4(vsi, input, raw_packet,
-                                            add);
+               ret = i40e_add_del_fdir_ipv4(vsi, input, add);
                break;
        case IP_USER_FLOW:
                switch (input->ip4_proto) {
                case IPPROTO_TCP:
-                       ret = i40e_add_del_fdir_tcpv4(vsi, input,
-                                                     raw_packet, add);
+                       ret = i40e_add_del_fdir_tcpv4(vsi, input, add);
                        break;
                case IPPROTO_UDP:
-                       ret = i40e_add_del_fdir_udpv4(vsi, input,
-                                                     raw_packet, add);
+                       ret = i40e_add_del_fdir_udpv4(vsi, input, add);
                        break;
                case IPPROTO_SCTP:
-                       ret = i40e_add_del_fdir_sctpv4(vsi, input,
-                                                      raw_packet, add);
+                       ret = i40e_add_del_fdir_sctpv4(vsi, input, add);
                        break;
                default:
-                       ret = i40e_add_del_fdir_ipv4(vsi, input,
-                                                    raw_packet, add);
+                       ret = i40e_add_del_fdir_ipv4(vsi, input, add);
                        break;
                }
                break;
@@ -419,7 +416,7 @@ int i40e_add_del_fdir(struct i40e_vsi *vsi,
                ret = -EINVAL;
        }
 
-       kfree(raw_packet);
+       /* The buffer allocated here is freed by the i40e_clean_tx_ring() */
        return ret;
 }
 
@@ -450,22 +447,24 @@ static void i40e_fd_handle_status(struct i40e_ring *rx_ring,
                         rx_desc->wb.qword0.hi_dword.fd_id);
 
                /* filter programming failed most likely due to table full */
-               fcnt_prog = i40e_get_current_fd_count(pf);
-               fcnt_avail = i40e_get_fd_cnt_all(pf);
+               fcnt_prog = i40e_get_cur_guaranteed_fd_count(pf);
+               fcnt_avail = pf->fdir_pf_filter_count;
                /* If ATR is running fcnt_prog can quickly change,
                 * if we are very close to full, it makes sense to disable
                 * FD ATR/SB and then re-enable it when there is room.
                 */
                if (fcnt_prog >= (fcnt_avail - I40E_FDIR_BUFFER_FULL_MARGIN)) {
                        /* Turn off ATR first */
-                       if (pf->flags & I40E_FLAG_FD_ATR_ENABLED) {
-                               pf->flags &= ~I40E_FLAG_FD_ATR_ENABLED;
+                       if ((pf->flags & I40E_FLAG_FD_ATR_ENABLED) &&
+                           !(pf->auto_disable_flags &
+                             I40E_FLAG_FD_ATR_ENABLED)) {
                                dev_warn(&pdev->dev, "FD filter space full, ATR for further flows will be turned off\n");
                                pf->auto_disable_flags |=
                                                       I40E_FLAG_FD_ATR_ENABLED;
                                pf->flags |= I40E_FLAG_FDIR_REQUIRES_REINIT;
-                       } else if (pf->flags & I40E_FLAG_FD_SB_ENABLED) {
-                               pf->flags &= ~I40E_FLAG_FD_SB_ENABLED;
+                       } else if ((pf->flags & I40E_FLAG_FD_SB_ENABLED) &&
+                                  !(pf->auto_disable_flags &
+                                    I40E_FLAG_FD_SB_ENABLED)) {
                                dev_warn(&pdev->dev, "FD filter space full, new ntuple rules will not be added\n");
                                pf->auto_disable_flags |=
                                                        I40E_FLAG_FD_SB_ENABLED;
@@ -491,7 +490,11 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
                                            struct i40e_tx_buffer *tx_buffer)
 {
        if (tx_buffer->skb) {
-               dev_kfree_skb_any(tx_buffer->skb);
+               if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buffer->raw_buf);
+               else
+                       dev_kfree_skb_any(tx_buffer->skb);
+
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
                                         dma_unmap_addr(tx_buffer, dma),
@@ -893,6 +896,11 @@ static void i40e_clean_programming_status(struct i40e_ring *rx_ring,
 
        if (id == I40E_RX_PROG_STATUS_DESC_FD_FILTER_STATUS)
                i40e_fd_handle_status(rx_ring, rx_desc, id);
+#ifdef I40E_FCOE
+       else if ((id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_PROG_STATUS) ||
+                (id == I40E_RX_PROG_STATUS_DESC_FCOE_CTXT_INVL_STATUS))
+               i40e_fcoe_handle_status(rx_ring, rx_desc, id);
+#endif
 }
 
 /**
@@ -1234,8 +1242,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
-           decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
-           rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
            rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                /* don't increment checksum err here, non-fatal err */
                return;
@@ -1488,6 +1494,12 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
                vlan_tag = rx_status & (1 << I40E_RX_DESC_STATUS_L2TAG1P_SHIFT)
                         ? le16_to_cpu(rx_desc->wb.qword0.lo_dword.l2tag1)
                         : 0;
+#ifdef I40E_FCOE
+               if (!i40e_fcoe_handle_offload(rx_ring, rx_desc, skb)) {
+                       dev_kfree_skb_any(skb);
+                       goto next_desc;
+               }
+#endif
                i40e_receive_skb(rx_ring, skb, vlan_tag);
 
                rx_ring->netdev->last_rx = jiffies;
@@ -1701,7 +1713,9 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
                I40E_TXD_FLTR_QW1_CNTINDEX_MASK;
 
        fdir_desc->qindex_flex_ptype_vsi = cpu_to_le32(flex_ptype);
+       fdir_desc->rsvd = cpu_to_le32(0);
        fdir_desc->dtype_cmd_cntindex = cpu_to_le32(dtype_cmd);
+       fdir_desc->fd_id = cpu_to_le32(0);
 }
 
 /**
@@ -1716,9 +1730,15 @@ static void i40e_atr(struct i40e_ring *tx_ring, struct sk_buff *skb,
  * Returns error code indicate the frame should be dropped upon error and the
  * otherwise  returns 0 to indicate the flags has been set properly.
  **/
+#ifdef I40E_FCOE
+int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                              struct i40e_ring *tx_ring,
+                              u32 *flags)
+#else
 static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring,
                                      u32 *flags)
+#endif
 {
        __be16 protocol = skb->protocol;
        u32  tx_flags = 0;
@@ -1740,9 +1760,8 @@ static int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
        }
 
        /* Insert 802.1p priority into VLAN header */
-       if ((tx_ring->vsi->back->flags & I40E_FLAG_DCB_ENABLED) &&
-           ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
-            (skb->priority != TC_PRIO_CONTROL))) {
+       if ((tx_flags & (I40E_TX_FLAGS_HW_VLAN | I40E_TX_FLAGS_SW_VLAN)) ||
+           (skb->priority != TC_PRIO_CONTROL)) {
                tx_flags &= ~I40E_TX_FLAGS_VLAN_PRIO_MASK;
                tx_flags |= (skb->priority & 0x7) <<
                                I40E_TX_FLAGS_VLAN_PRIO_SHIFT;
@@ -1850,7 +1869,8 @@ static int i40e_tsyn(struct i40e_ring *tx_ring, struct sk_buff *skb,
         * we are not already transmitting a packet to be timestamped
         */
        pf = i40e_netdev_to_pf(tx_ring->netdev);
-       if (pf->ptp_tx && !pf->ptp_tx_skb) {
+       if (pf->ptp_tx &&
+           !test_and_set_bit_lock(__I40E_PTP_TX_IN_PROGRESS, &pf->state)) {
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                pf->ptp_tx_skb = skb_get(skb);
        } else {
@@ -2000,6 +2020,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        /* cpu_to_le32 and assign to struct fields */
        context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
        context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+       context_desc->rsvd = cpu_to_le16(0);
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
@@ -2013,9 +2034,15 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
  * @td_cmd:   the command field in the descriptor
  * @td_offset: offset for checksum or crc
  **/
+#ifdef I40E_FCOE
+void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                struct i40e_tx_buffer *first, u32 tx_flags,
+                const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#else
 static void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
                        struct i40e_tx_buffer *first, u32 tx_flags,
                        const u8 hdr_len, u32 td_cmd, u32 td_offset)
+#endif
 {
        unsigned int data_len = skb->data_len;
        unsigned int size = skb_headlen(skb);
@@ -2192,7 +2219,11 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  *
  * Returns 0 if stop is not needed
  **/
+#ifdef I40E_FCOE
+int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+#else
 static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
+#endif
 {
        if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
                return 0;
@@ -2208,8 +2239,13 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * there is not enough descriptors available in this ring since we need at least
  * one descriptor.
  **/
+#ifdef I40E_FCOE
+int i40e_xmit_descriptor_count(struct sk_buff *skb,
+                              struct i40e_ring *tx_ring)
+#else
 static int i40e_xmit_descriptor_count(struct sk_buff *skb,
                                      struct i40e_ring *tx_ring)
+#endif
 {
        unsigned int f;
        int count = 0;
@@ -2278,13 +2314,13 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
-       skb_tx_timestamp(skb);
-
        tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
 
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
+       skb_tx_timestamp(skb);
+
        /* always enable CRC insertion offload */
        td_cmd |= I40E_TX_DESC_CMD_ICRC;
 
index 0277894..73f4fa4 100644 (file)
@@ -75,7 +75,6 @@ enum i40e_dyn_idx_t {
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
        ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
@@ -132,6 +131,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
 #define I40E_TX_FLAGS_TSYN             (u32)(1 << 8)
+#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -140,7 +140,10 @@ enum i40e_dyn_idx_t {
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
        unsigned long time_stamp;
-       struct sk_buff *skb;
+       union {
+               struct sk_buff *skb;
+               void *raw_buf;
+       };
        unsigned int bytecount;
        unsigned short gso_segs;
        DEFINE_DMA_UNMAP_ADDR(dma);
@@ -287,4 +290,13 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring);
 void i40e_free_tx_resources(struct i40e_ring *tx_ring);
 void i40e_free_rx_resources(struct i40e_ring *rx_ring);
 int i40e_napi_poll(struct napi_struct *napi, int budget);
+#ifdef I40E_FCOE
+void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
+                struct i40e_tx_buffer *first, u32 tx_flags,
+                const u8 hdr_len, u32 td_cmd, u32 td_offset);
+int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
+int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
+                              struct i40e_ring *tx_ring, u32 *flags);
+#endif
 #endif /* _I40E_TXRX_H_ */
index 9d39ff2..ce04d90 100644 (file)
@@ -50,6 +50,9 @@
                                         (d) == I40E_DEV_ID_QSFP_B  || \
                                         (d) == I40E_DEV_ID_QSFP_C)
 
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+
 #define I40E_MAX_VSI_QP                        16
 #define I40E_MAX_VF_VSI                        3
 #define I40E_MAX_CHAINED_RX_BUFFERS    5
@@ -137,6 +140,14 @@ enum i40e_fc_mode {
        I40E_FC_DEFAULT
 };
 
+enum i40e_set_fc_aq_failures {
+       I40E_SET_FC_AQ_FAIL_NONE = 0,
+       I40E_SET_FC_AQ_FAIL_GET = 1,
+       I40E_SET_FC_AQ_FAIL_SET = 2,
+       I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+       I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
 enum i40e_vsi_type {
        I40E_VSI_MAIN = 0,
        I40E_VSI_VMDQ1,
@@ -163,6 +174,7 @@ struct i40e_link_status {
        u8 an_info;
        u8 ext_info;
        u8 loopback;
+       bool an_enabled;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
        u16 max_frame_size;
@@ -234,6 +246,7 @@ struct i40e_mac_info {
        u8 addr[ETH_ALEN];
        u8 perm_addr[ETH_ALEN];
        u8 san_addr[ETH_ALEN];
+       u8 port_addr[ETH_ALEN];
        u16 max_fcoeq;
 };
 
@@ -256,6 +269,61 @@ struct i40e_nvm_info {
        u32 eetrack;              /* NVM data version */
 };
 
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+       I40E_NVMUPD_INVALID,
+       I40E_NVMUPD_READ_CON,
+       I40E_NVMUPD_READ_SNT,
+       I40E_NVMUPD_READ_LCB,
+       I40E_NVMUPD_READ_SA,
+       I40E_NVMUPD_WRITE_ERA,
+       I40E_NVMUPD_WRITE_CON,
+       I40E_NVMUPD_WRITE_SNT,
+       I40E_NVMUPD_WRITE_LCB,
+       I40E_NVMUPD_WRITE_SA,
+       I40E_NVMUPD_CSUM_CON,
+       I40E_NVMUPD_CSUM_SA,
+       I40E_NVMUPD_CSUM_LCB,
+};
+
+enum i40e_nvmupd_state {
+       I40E_NVMUPD_STATE_INIT,
+       I40E_NVMUPD_STATE_READING,
+       I40E_NVMUPD_STATE_WRITING
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code.  Where is the right file?
+ */
+#define I40E_NVM_READ  0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT   8
+#define I40E_NVM_TRANS_MASK    (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON           0x0
+#define I40E_NVM_SNT           0x1
+#define I40E_NVM_LCB           0x2
+#define I40E_NVM_SA            (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA           0x4
+#define I40E_NVM_CSUM          0x8
+
+#define I40E_NVM_ADAPT_SHIFT   16
+#define I40E_NVM_ADAPT_MASK    (0xffff << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA   4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+       u32 command;
+       u32 config;
+       u32 offset;     /* in bytes */
+       u32 data_size;  /* in bytes */
+       u8 data[1];
+};
+
 /* PCI bus types */
 enum i40e_bus_type {
        i40e_bus_type_unknown = 0,
@@ -391,6 +459,9 @@ struct i40e_hw {
        /* Admin Queue info */
        struct i40e_adminq_info aq;
 
+       /* state of nvm update process */
+       enum i40e_nvmupd_state nvmupd_state;
+
        /* HMC info */
        struct i40e_hmc_info hmc; /* HMC info struct */
 
@@ -875,7 +946,6 @@ enum i40e_filter_pctype {
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
        /* Note: Values 37-40 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
-       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN            = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
@@ -981,6 +1051,25 @@ struct i40e_eth_stats {
        u64 tx_errors;                  /* tepc */
 };
 
+#ifdef I40E_FCOE
+/* Statistics collected per function for FCoE */
+struct i40e_fcoe_stats {
+       u64 rx_fcoe_packets;            /* fcoeprc */
+       u64 rx_fcoe_dwords;             /* focedwrc */
+       u64 rx_fcoe_dropped;            /* fcoerpdc */
+       u64 tx_fcoe_packets;            /* fcoeptc */
+       u64 tx_fcoe_dwords;             /* focedwtc */
+       u64 fcoe_bad_fccrc;             /* fcoecrc */
+       u64 fcoe_last_error;            /* fcoelast */
+       u64 fcoe_ddp_count;             /* fcoeddpc */
+};
+
+/* offset to per function FCoE statistics block */
+#define I40E_FCOE_VF_STAT_OFFSET       0
+#define I40E_FCOE_PF_STAT_OFFSET       128
+#define I40E_FCOE_STAT_MAX             (I40E_FCOE_PF_STAT_OFFSET + I40E_MAX_PF)
+
+#endif
 /* Statistics collected by the MAC */
 struct i40e_hw_port_stats {
        /* eth stats collected by the port */
@@ -1061,6 +1150,125 @@ struct i40e_hw_port_stats {
 
 #define I40E_SRRD_SRCTL_ATTEMPTS       100000
 
+#ifdef I40E_FCOE
+/* FCoE Tx context descriptor - Use the i40e_tx_context_desc struct */
+
+enum i40E_fcoe_tx_ctx_desc_cmd_bits {
+       I40E_FCOE_TX_CTX_DESC_OPCODE_SINGLE_SEND        = 0x00, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS2      = 0x01, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_TSO_FC_CLASS3      = 0x05, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS2     = 0x02, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_ETSO_FC_CLASS3     = 0x06, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS2      = 0x03, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_FC_CLASS3      = 0x07, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DDP_CTX_INVL       = 0x08, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_OPCODE_DWO_CTX_INVL       = 0x09, /* 4 BITS */
+       I40E_FCOE_TX_CTX_DESC_RELOFF                    = 0x10,
+       I40E_FCOE_TX_CTX_DESC_CLRSEQ                    = 0x20,
+       I40E_FCOE_TX_CTX_DESC_DIFENA                    = 0x40,
+       I40E_FCOE_TX_CTX_DESC_IL2TAG2                   = 0x80
+};
+
+/* FCoE DDP Context descriptor */
+struct i40e_fcoe_ddp_context_desc {
+       __le64 rsvd;
+       __le64 type_cmd_foff_lsize;
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT      0
+#define I40E_FCOE_DDP_CTX_QW1_DTYPE_MASK       (0xFULL << \
+                                       I40E_FCOE_DDP_CTX_QW1_DTYPE_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT        4
+#define I40E_FCOE_DDP_CTX_QW1_CMD_MASK (0xFULL << \
+                                        I40E_FCOE_DDP_CTX_QW1_CMD_SHIFT)
+
+enum i40e_fcoe_ddp_ctx_desc_cmd_bits {
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_512B       = 0x00, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_4K         = 0x01, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_8K         = 0x02, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_BSIZE_16K        = 0x03, /* 2 BITS */
+       I40E_FCOE_DDP_CTX_DESC_DIFENA           = 0x04, /* 1 BIT  */
+       I40E_FCOE_DDP_CTX_DESC_LASTSEQH         = 0x08, /* 1 BIT  */
+};
+
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT       16
+#define I40E_FCOE_DDP_CTX_QW1_FOFF_MASK        (0x3FFFULL << \
+                                        I40E_FCOE_DDP_CTX_QW1_FOFF_SHIFT)
+
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT      32
+#define I40E_FCOE_DDP_CTX_QW1_LSIZE_MASK       (0x3FFFULL << \
+                                       I40E_FCOE_DDP_CTX_QW1_LSIZE_SHIFT)
+
+/* FCoE DDP/DWO Queue Context descriptor */
+struct i40e_fcoe_queue_context_desc {
+       __le64 dmaindx_fbase;           /* 0:11 DMAINDX, 12:63 FBASE */
+       __le64 flen_tph;                /* 0:12 FLEN, 13:15 TPH */
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT  0
+#define I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_MASK   (0xFFFULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW0_DMAINDX_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT    12
+#define I40E_FCOE_QUEUE_CTX_QW0_FBASE_MASK     (0xFFFFFFFFFFFFFULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW0_FBASE_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT     0
+#define I40E_FCOE_QUEUE_CTX_QW1_FLEN_MASK      (0x1FFFULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_SHIFT      13
+#define I40E_FCOE_QUEUE_CTX_QW1_TPH_MASK       (0x7ULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW1_FLEN_SHIFT)
+
+enum i40e_fcoe_queue_ctx_desc_tph_bits {
+       I40E_FCOE_QUEUE_CTX_DESC_TPHRDESC       = 0x1,
+       I40E_FCOE_QUEUE_CTX_DESC_TPHDATA        = 0x2
+};
+
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT   30
+#define I40E_FCOE_QUEUE_CTX_QW1_RECIPE_MASK    (0x3ULL << \
+                                       I40E_FCOE_QUEUE_CTX_QW1_RECIPE_SHIFT)
+
+/* FCoE DDP/DWO Filter Context descriptor */
+struct i40e_fcoe_filter_context_desc {
+       __le32 param;
+       __le16 seqn;
+
+       /* 48:51(0:3) RSVD, 52:63(4:15) DMAINDX */
+       __le16 rsvd_dmaindx;
+
+       /* 0:7 FLAGS, 8:52 RSVD, 53:63 LANQ */
+       __le64 flags_rsvd_lanq;
+};
+
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT 4
+#define I40E_FCOE_FILTER_CTX_QW0_DMAINDX_MASK  (0xFFF << \
+                                       I40E_FCOE_FILTER_CTX_QW0_DMAINDX_SHIFT)
+
+enum i40e_fcoe_filter_ctx_desc_flags_bits {
+       I40E_FCOE_FILTER_CTX_DESC_CTYP_DDP      = 0x00,
+       I40E_FCOE_FILTER_CTX_DESC_CTYP_DWO      = 0x01,
+       I40E_FCOE_FILTER_CTX_DESC_ENODE_INIT    = 0x00,
+       I40E_FCOE_FILTER_CTX_DESC_ENODE_RSP     = 0x02,
+       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS2     = 0x00,
+       I40E_FCOE_FILTER_CTX_DESC_FC_CLASS3     = 0x04
+};
+
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT   0
+#define I40E_FCOE_FILTER_CTX_QW1_FLAGS_MASK    (0xFFULL << \
+                                       I40E_FCOE_FILTER_CTX_QW1_FLAGS_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT     8
+#define I40E_FCOE_FILTER_CTX_QW1_PCTYPE_MASK      (0x3FULL << \
+                       I40E_FCOE_FILTER_CTX_QW1_PCTYPE_SHIFT)
+
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT     53
+#define I40E_FCOE_FILTER_CTX_QW1_LANQINDX_MASK      (0x7FFULL << \
+                       I40E_FCOE_FILTER_CTX_QW1_LANQINDX_SHIFT)
+
+#endif /* I40E_FCOE */
 enum i40e_switch_element_types {
        I40E_SWITCH_ELEMENT_TYPE_MAC    = 1,
        I40E_SWITCH_ELEMENT_TYPE_PF     = 2,
@@ -1162,4 +1370,7 @@ enum i40e_reset_type {
        I40E_RESET_GLOBR        = 2,
        I40E_RESET_EMPR         = 3,
 };
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
 #endif /* _I40E_TYPE_H_ */
index f5b9d20..8967255 100644 (file)
@@ -347,10 +347,6 @@ static int i40e_config_vsi_rx_queue(struct i40e_vf *vf, u16 vsi_idx,
        rx_ctx.dsize = 1;
 
        /* default values */
-       rx_ctx.tphrdesc_ena = 1;
-       rx_ctx.tphwdesc_ena = 1;
-       rx_ctx.tphdata_ena = 1;
-       rx_ctx.tphhead_ena = 1;
        rx_ctx.lrxqthresh = 2;
        rx_ctx.crcstrip = 1;
        rx_ctx.prefena = 1;
@@ -673,7 +669,7 @@ void i40e_reset_vf(struct i40e_vf *vf, bool flr)
         */
        for (i = 0; i < 100; i++) {
                /* vf reset requires driver to first reset the
-                * vf & than poll the status register to make sure
+                * vf and then poll the status register to make sure
                 * that the requested op was completed
                 * successfully
                 */
@@ -1009,7 +1005,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
 {
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
-       int true_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
+       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        i40e_status aq_ret;
 
        /* single place to detect unsuccessful return values */
@@ -1029,7 +1025,7 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
                vf->num_valid_msgs++;
        }
 
-       aq_ret = i40e_aq_send_msg_to_vf(hw, true_vf_id, v_opcode, v_retval,
+       aq_ret = i40e_aq_send_msg_to_vf(hw, abs_vf_id,  v_opcode, v_retval,
                                        msg, msglen, NULL);
        if (aq_ret) {
                dev_err(&pf->pdev->dev,
@@ -1167,8 +1163,8 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
            (struct i40e_virtchnl_promisc_info *)msg;
        struct i40e_pf *pf = vf->pf;
        struct i40e_hw *hw = &pf->hw;
+       struct i40e_vsi *vsi;
        bool allmulti = false;
-       bool promisc = false;
        i40e_status aq_ret;
 
        if (!test_bit(I40E_VF_STAT_ACTIVE, &vf->vf_states) ||
@@ -1178,17 +1174,10 @@ static int i40e_vc_config_promiscuous_mode_msg(struct i40e_vf *vf,
                aq_ret = I40E_ERR_PARAM;
                goto error_param;
        }
-
-       if (info->flags & I40E_FLAG_VF_UNICAST_PROMISC)
-               promisc = true;
-       aq_ret = i40e_aq_set_vsi_unicast_promiscuous(hw, info->vsi_id,
-                                                    promisc, NULL);
-       if (aq_ret)
-               goto error_param;
-
+       vsi = pf->vsi[info->vsi_id];
        if (info->flags & I40E_FLAG_VF_MULTICAST_PROMISC)
                allmulti = true;
-       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, info->vsi_id,
+       aq_ret = i40e_aq_set_vsi_multicast_promiscuous(hw, vsi->seid,
                                                       allmulti, NULL);
 
 error_param:
@@ -1939,15 +1928,17 @@ static void i40e_vc_vf_broadcast(struct i40e_pf *pf,
 {
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf = pf->vf;
+       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        int i;
 
        for (i = 0; i < pf->num_alloc_vfs; i++) {
                /* Ignore return value on purpose - a given VF may fail, but
                 * we need to keep going and send to all of them
                 */
-               i40e_aq_send_msg_to_vf(hw, vf->vf_id, v_opcode, v_retval,
+               i40e_aq_send_msg_to_vf(hw, abs_vf_id, v_opcode, v_retval,
                                       msg, msglen, NULL);
                vf++;
+               abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        }
 }
 
@@ -1963,6 +1954,7 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf = pf->vf;
        struct i40e_link_status *ls = &pf->hw.phy.link_info;
+       int abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        int i;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
@@ -1977,10 +1969,11 @@ void i40e_vc_notify_link_state(struct i40e_pf *pf)
                                ls->link_info & I40E_AQ_LINK_UP;
                        pfe.event_data.link_event.link_speed = ls->link_speed;
                }
-               i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+               i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
                                       0, (u8 *)&pfe, sizeof(pfe),
                                       NULL);
                vf++;
+               abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
        }
 }
 
@@ -2009,10 +2002,11 @@ void i40e_vc_notify_reset(struct i40e_pf *pf)
 void i40e_vc_notify_vf_reset(struct i40e_vf *vf)
 {
        struct i40e_virtchnl_pf_event pfe;
+       int abs_vf_id = vf->vf_id + vf->pf->hw.func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_RESET_IMPENDING;
        pfe.severity = I40E_PF_EVENT_SEVERITY_CERTAIN_DOOM;
-       i40e_aq_send_msg_to_vf(&vf->pf->hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+       i40e_aq_send_msg_to_vf(&vf->pf->hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
                               I40E_SUCCESS, (u8 *)&pfe,
                               sizeof(struct i40e_virtchnl_pf_event), NULL);
 }
@@ -2077,6 +2071,8 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
        }
        ether_addr_copy(vf->default_lan_addr.addr, mac);
        vf->pf_set_mac = true;
+       /* Force the VF driver stop so it has to reload with new MAC address */
+       i40e_vc_disable_vf(pf, vf);
        dev_info(&pf->pdev->dev, "Reload the VF driver to make this change effective.\n");
        ret = 0;
 
@@ -2347,6 +2343,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
        struct i40e_virtchnl_pf_event pfe;
        struct i40e_hw *hw = &pf->hw;
        struct i40e_vf *vf;
+       int abs_vf_id;
        int ret = 0;
 
        /* validate the request */
@@ -2357,6 +2354,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
        }
 
        vf = &pf->vf[vf_id];
+       abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
 
        pfe.event = I40E_VIRTCHNL_EVENT_LINK_CHANGE;
        pfe.severity = I40E_PF_EVENT_SEVERITY_INFO;
@@ -2386,7 +2384,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
                goto error_out;
        }
        /* Notify the VF of its new link state */
-       i40e_aq_send_msg_to_vf(hw, vf->vf_id, I40E_VIRTCHNL_OP_EVENT,
+       i40e_aq_send_msg_to_vf(hw, abs_vf_id, I40E_VIRTCHNL_OP_EVENT,
                               0, (u8 *)&pfe, sizeof(pfe), NULL);
 
 error_out:
index eb67cce..0030060 100644 (file)
@@ -53,16 +53,24 @@ static void i40e_adminq_init_regs(struct i40e_hw *hw)
                hw->aq.asq.tail = I40E_VF_ATQT1;
                hw->aq.asq.head = I40E_VF_ATQH1;
                hw->aq.asq.len  = I40E_VF_ATQLEN1;
+               hw->aq.asq.bal  = I40E_VF_ATQBAL1;
+               hw->aq.asq.bah  = I40E_VF_ATQBAH1;
                hw->aq.arq.tail = I40E_VF_ARQT1;
                hw->aq.arq.head = I40E_VF_ARQH1;
                hw->aq.arq.len  = I40E_VF_ARQLEN1;
+               hw->aq.arq.bal  = I40E_VF_ARQBAL1;
+               hw->aq.arq.bah  = I40E_VF_ARQBAH1;
        } else {
                hw->aq.asq.tail = I40E_PF_ATQT;
                hw->aq.asq.head = I40E_PF_ATQH;
                hw->aq.asq.len  = I40E_PF_ATQLEN;
+               hw->aq.asq.bal  = I40E_PF_ATQBAL;
+               hw->aq.asq.bah  = I40E_PF_ATQBAH;
                hw->aq.arq.tail = I40E_PF_ARQT;
                hw->aq.arq.head = I40E_PF_ARQH;
                hw->aq.arq.len  = I40E_PF_ARQLEN;
+               hw->aq.arq.bal  = I40E_PF_ARQBAL;
+               hw->aq.arq.bah  = I40E_PF_ARQBAH;
        }
 }
 
@@ -294,27 +302,18 @@ static i40e_status i40e_config_asq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the transmit queue */
-               wr32(hw, I40E_VF_ATQBAH1,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQBAL1,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_VF_ATQLEN1, (hw->aq.num_asq_entries |
-                                         I40E_VF_ATQLEN1_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ATQBAL1);
-       } else {
-               /* configure the transmit queue */
-               wr32(hw, I40E_PF_ATQBAH,
-                   upper_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQBAL,
-                   lower_32_bits(hw->aq.asq.desc_buf.pa));
-               wr32(hw, I40E_PF_ATQLEN, (hw->aq.num_asq_entries |
-                                         I40E_PF_ATQLEN_ATQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ATQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.asq.head, 0);
+       wr32(hw, hw->aq.asq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.asq.len, (hw->aq.num_asq_entries |
+                                 I40E_PF_ATQLEN_ATQENABLE_MASK));
+       wr32(hw, hw->aq.asq.bal, lower_32_bits(hw->aq.asq.desc_buf.pa));
+       wr32(hw, hw->aq.asq.bah, upper_32_bits(hw->aq.asq.desc_buf.pa));
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.asq.bal);
        if (reg != lower_32_bits(hw->aq.asq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -332,30 +331,21 @@ static i40e_status i40e_config_arq_regs(struct i40e_hw *hw)
        i40e_status ret_code = 0;
        u32 reg = 0;
 
-       if (hw->mac.type == I40E_MAC_VF) {
-               /* configure the receive queue */
-               wr32(hw, I40E_VF_ARQBAH1,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQBAL1,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_VF_ARQLEN1, (hw->aq.num_arq_entries |
-                                         I40E_VF_ARQLEN1_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_VF_ARQBAL1);
-       } else {
-               /* configure the receive queue */
-               wr32(hw, I40E_PF_ARQBAH,
-                   upper_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQBAL,
-                   lower_32_bits(hw->aq.arq.desc_buf.pa));
-               wr32(hw, I40E_PF_ARQLEN, (hw->aq.num_arq_entries |
-                                         I40E_PF_ARQLEN_ARQENABLE_MASK));
-               reg = rd32(hw, I40E_PF_ARQBAL);
-       }
+       /* Clear Head and Tail */
+       wr32(hw, hw->aq.arq.head, 0);
+       wr32(hw, hw->aq.arq.tail, 0);
+
+       /* set starting point */
+       wr32(hw, hw->aq.arq.len, (hw->aq.num_arq_entries |
+                                 I40E_PF_ARQLEN_ARQENABLE_MASK));
+       wr32(hw, hw->aq.arq.bal, lower_32_bits(hw->aq.arq.desc_buf.pa));
+       wr32(hw, hw->aq.arq.bah, upper_32_bits(hw->aq.arq.desc_buf.pa));
 
        /* Update tail in the HW to post pre-allocated buffers */
        wr32(hw, hw->aq.arq.tail, hw->aq.num_arq_entries - 1);
 
        /* Check one register to verify that config was applied */
+       reg = rd32(hw, hw->aq.arq.bal);
        if (reg != lower_32_bits(hw->aq.arq.desc_buf.pa))
                ret_code = I40E_ERR_ADMIN_QUEUE_ERROR;
 
@@ -497,6 +487,8 @@ static i40e_status i40e_shutdown_asq(struct i40e_hw *hw)
        wr32(hw, hw->aq.asq.head, 0);
        wr32(hw, hw->aq.asq.tail, 0);
        wr32(hw, hw->aq.asq.len, 0);
+       wr32(hw, hw->aq.asq.bal, 0);
+       wr32(hw, hw->aq.asq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.asq_mutex);
@@ -528,6 +520,8 @@ static i40e_status i40e_shutdown_arq(struct i40e_hw *hw)
        wr32(hw, hw->aq.arq.head, 0);
        wr32(hw, hw->aq.arq.tail, 0);
        wr32(hw, hw->aq.arq.len, 0);
+       wr32(hw, hw->aq.arq.bal, 0);
+       wr32(hw, hw->aq.arq.bah, 0);
 
        /* make sure lock is available */
        mutex_lock(&hw->aq.arq_mutex);
@@ -573,6 +567,9 @@ i40e_status i40evf_init_adminq(struct i40e_hw *hw)
        /* Set up register offsets */
        i40e_adminq_init_regs(hw);
 
+       /* setup ASQ command write back timeout */
+       hw->aq.asq_cmd_timeout = I40E_ASQ_CMD_TIMEOUT;
+
        /* allocate the ASQ */
        ret_code = i40e_init_asq(hw);
        if (ret_code)
@@ -630,6 +627,10 @@ static u16 i40e_clean_asq(struct i40e_hw *hw)
        desc = I40E_ADMINQ_DESC(*asq, ntc);
        details = I40E_ADMINQ_DETAILS(*asq, ntc);
        while (rd32(hw, hw->aq.asq.head) != ntc) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "%s: ntc %d head %d.\n", __func__, ntc,
+                          rd32(hw, hw->aq.asq.head));
+
                if (details->callback) {
                        I40E_ADMINQ_CALLBACK cb_func =
                                        (I40E_ADMINQ_CALLBACK)details->callback;
@@ -690,17 +691,20 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
        struct i40e_aq_desc *desc_on_ring;
        bool cmd_completed = false;
        u16  retval = 0;
+       u32  val = 0;
 
-       if (hw->aq.asq.count == 0) {
+       val = rd32(hw, hw->aq.asq.head);
+       if (val >= hw->aq.num_asq_entries) {
                i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
-                          "AQTX: Admin queue not initialized.\n");
+                          "AQTX: head overrun at %d\n", val);
                status = I40E_ERR_QUEUE_EMPTY;
                goto asq_send_command_exit;
        }
 
-       if (i40e_is_nvm_update_op(desc) && hw->aq.nvm_busy) {
-               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: NVM busy.\n");
-               status = I40E_ERR_NVM;
+       if (hw->aq.asq.count == 0) {
+               i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                          "AQTX: Admin queue not initialized.\n");
+               status = I40E_ERR_QUEUE_EMPTY;
                goto asq_send_command_exit;
        }
 
@@ -783,6 +787,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
        }
 
        /* bump the tail */
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
        i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring, buff);
        (hw->aq.asq.next_to_use)++;
        if (hw->aq.asq.next_to_use == hw->aq.asq.count)
@@ -806,7 +811,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
                        /* ugh! delay while spin_lock */
                        udelay(delay_len);
                        total_delay += delay_len;
-               } while (total_delay <  I40E_ASQ_CMD_TIMEOUT);
+               } while (total_delay < hw->aq.asq_cmd_timeout);
        }
 
        /* if ready, copy the desc back to temp */
@@ -820,6 +825,7 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
                                   I40E_DEBUG_AQ_MESSAGE,
                                   "AQTX: Command completed with error 0x%X.\n",
                                   retval);
+
                        /* strip off FW internal code */
                        retval &= 0xff;
                }
@@ -834,6 +840,10 @@ i40e_status i40evf_asq_send_command(struct i40e_hw *hw,
        if (i40e_is_nvm_update_op(desc))
                hw->aq.nvm_busy = true;
 
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+                  "AQTX: desc and buffer writeback:\n");
+       i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff);
+
        /* update the error if time out occurred */
        if ((!cmd_completed) &&
            (!details->async && !details->postpone)) {
@@ -905,10 +915,6 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
        /* now clean the next descriptor */
        desc = I40E_ADMINQ_DESC(hw->aq.arq, ntc);
        desc_idx = ntc;
-       i40evf_debug_aq(hw,
-                     I40E_DEBUG_AQ_COMMAND,
-                     (void *)desc,
-                     hw->aq.arq.r.arq_bi[desc_idx].va);
 
        flags = le16_to_cpu(desc->flags);
        if (flags & I40E_AQ_FLAG_ERR) {
@@ -919,18 +925,21 @@ i40e_status i40evf_clean_arq_element(struct i40e_hw *hw,
                           I40E_DEBUG_AQ_MESSAGE,
                           "AQRX: Event received with error 0x%X.\n",
                           hw->aq.arq_last_status);
-       } else {
-               e->desc = *desc;
-               datalen = le16_to_cpu(desc->datalen);
-               e->msg_size = min(datalen, e->msg_size);
-               if (e->msg_buf != NULL && (e->msg_size != 0))
-                       memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
-                              e->msg_size);
        }
 
+       e->desc = *desc;
+       datalen = le16_to_cpu(desc->datalen);
+       e->msg_size = min(datalen, e->msg_size);
+       if (e->msg_buf != NULL && (e->msg_size != 0))
+               memcpy(e->msg_buf, hw->aq.arq.r.arq_bi[desc_idx].va,
+                      e->msg_size);
+
        if (i40e_is_nvm_update_op(&e->desc))
                hw->aq.nvm_busy = false;
 
+       i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+       i40evf_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf);
+
        /* Restore the original datalen and buffer address in the desc,
         * FW updates datalen to indicate the event message
         * size
index e3472c6..91a5c5b 100644 (file)
@@ -56,6 +56,8 @@ struct i40e_adminq_ring {
        u32 head;
        u32 tail;
        u32 len;
+       u32 bah;
+       u32 bal;
 };
 
 /* ASQ transaction details */
@@ -82,6 +84,7 @@ struct i40e_arq_event_info {
 struct i40e_adminq_info {
        struct i40e_adminq_ring arq;    /* receive queue */
        struct i40e_adminq_ring asq;    /* send queue */
+       u32 asq_cmd_timeout;            /* send queue cmd write back timeout*/
        u16 num_arq_entries;            /* receive queue depth */
        u16 num_asq_entries;            /* send queue depth */
        u16 arq_buf_size;               /* receive queue buffer size */
@@ -91,6 +94,7 @@ struct i40e_adminq_info {
        u16 api_maj_ver;                /* api major version */
        u16 api_min_ver;                /* api minor version */
        bool nvm_busy;
+       bool nvm_release_on_done;
 
        struct mutex asq_mutex; /* Send queue lock */
        struct mutex arq_mutex; /* Receive queue lock */
@@ -100,6 +104,41 @@ struct i40e_adminq_info {
        enum i40e_admin_queue_err arq_last_status;
 };
 
+/**
+ * i40e_aq_rc_to_posix - convert errors to user-land codes
+ * aq_rc: AdminQ error code to convert
+ **/
+static inline int i40e_aq_rc_to_posix(u16 aq_rc)
+{
+       int aq_to_posix[] = {
+               0,           /* I40E_AQ_RC_OK */
+               -EPERM,      /* I40E_AQ_RC_EPERM */
+               -ENOENT,     /* I40E_AQ_RC_ENOENT */
+               -ESRCH,      /* I40E_AQ_RC_ESRCH */
+               -EINTR,      /* I40E_AQ_RC_EINTR */
+               -EIO,        /* I40E_AQ_RC_EIO */
+               -ENXIO,      /* I40E_AQ_RC_ENXIO */
+               -E2BIG,      /* I40E_AQ_RC_E2BIG */
+               -EAGAIN,     /* I40E_AQ_RC_EAGAIN */
+               -ENOMEM,     /* I40E_AQ_RC_ENOMEM */
+               -EACCES,     /* I40E_AQ_RC_EACCES */
+               -EFAULT,     /* I40E_AQ_RC_EFAULT */
+               -EBUSY,      /* I40E_AQ_RC_EBUSY */
+               -EEXIST,     /* I40E_AQ_RC_EEXIST */
+               -EINVAL,     /* I40E_AQ_RC_EINVAL */
+               -ENOTTY,     /* I40E_AQ_RC_ENOTTY */
+               -ENOSPC,     /* I40E_AQ_RC_ENOSPC */
+               -ENOSYS,     /* I40E_AQ_RC_ENOSYS */
+               -ERANGE,     /* I40E_AQ_RC_ERANGE */
+               -EPIPE,      /* I40E_AQ_RC_EFLUSHED */
+               -ESPIPE,     /* I40E_AQ_RC_BAD_ADDR */
+               -EROFS,      /* I40E_AQ_RC_EMODE */
+               -EFBIG,      /* I40E_AQ_RC_EFBIG */
+       };
+
+       return aq_to_posix[aq_rc];
+}
+
 /* general information */
 #define I40E_AQ_LARGE_BUF      512
 #define I40E_ASQ_CMD_TIMEOUT   100000  /* usecs */
index a43155a..4ea90bf 100644 (file)
@@ -551,6 +551,7 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
                                struct i40e_asq_cmd_details *cmd_details)
 {
        struct i40e_aq_desc desc;
+       struct i40e_asq_cmd_details details;
        i40e_status status;
 
        i40evf_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_send_msg_to_pf);
@@ -565,7 +566,6 @@ i40e_status i40e_aq_send_msg_to_pf(struct i40e_hw *hw,
                desc.datalen = cpu_to_le16(msglen);
        }
        if (!cmd_details) {
-               struct i40e_asq_cmd_details details;
                memset(&details, 0, sizeof(details));
                details.async = true;
                cmd_details = &details;
index a2ad9a4..931c880 100644 (file)
@@ -127,7 +127,7 @@ struct i40e_hmc_info {
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |                  \
                (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);            \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, val1);                        \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
@@ -146,7 +146,7 @@ struct i40e_hmc_info {
                I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |               \
                ((((type) == I40E_SD_TYPE_PAGED) ? 0 : 1) <<            \
                I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);                   \
-       val3 = (sd_index) | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);       \
+       val3 = (sd_index) | (1u << I40E_PFHMC_SDCMD_PMSDWR_SHIFT);      \
        wr32((hw), I40E_PFHMC_SDDATAHIGH, 0);                           \
        wr32((hw), I40E_PFHMC_SDDATALOW, val2);                         \
        wr32((hw), I40E_PFHMC_SDCMD, val3);                             \
index d6f7622..a5d7987 100644 (file)
@@ -32,16 +32,22 @@ struct i40e_hw;
 
 /* HMC element context information */
 
-/* Rx queue context data */
+/* Rx queue context data
+ *
+ * The sizes of the variables may be larger than needed due to crossing byte
+ * boundaries. If we do not have the width of the variable set to the correct
+ * size then we could end up shifting bits off the top of the variable when the
+ * variable is at the top of a byte and crosses over into the next byte.
+ */
 struct i40e_hmc_obj_rxq {
        u16 head;
-       u8  cpuid;
+       u16 cpuid; /* bigger than needed, see above for reason */
        u64 base;
        u16 qlen;
 #define I40E_RXQ_CTX_DBUFF_SHIFT 7
-       u8  dbuff;
+       u16 dbuff; /* bigger than needed, see above for reason */
 #define I40E_RXQ_CTX_HBUFF_SHIFT 6
-       u8  hbuff;
+       u16 hbuff; /* bigger than needed, see above for reason */
        u8  dtype;
        u8  dsize;
        u8  crcstrip;
@@ -50,16 +56,22 @@ struct i40e_hmc_obj_rxq {
        u8  hsplit_0;
        u8  hsplit_1;
        u8  showiv;
-       u16 rxmax;
+       u32 rxmax; /* bigger than needed, see above for reason */
        u8  tphrdesc_ena;
        u8  tphwdesc_ena;
        u8  tphdata_ena;
        u8  tphhead_ena;
-       u8  lrxqthresh;
+       u16 lrxqthresh; /* bigger than needed, see above for reason */
        u8  prefena;    /* NOTE: normally must be set to 1 at init */
 };
 
-/* Tx queue context data */
+/* Tx queue context data
+*
+* The sizes of the variables may be larger than needed due to crossing byte
+* boundaries. If we do not have the width of the variable set to the correct
+* size then we could end up shifting bits off the top of the variable when the
+* variable is at the top of a byte and crosses over into the next byte.
+*/
 struct i40e_hmc_obj_txq {
        u16 head;
        u8  new_context;
@@ -69,7 +81,7 @@ struct i40e_hmc_obj_txq {
        u8  fd_ena;
        u8  alt_vlan_ena;
        u16 thead_wb;
-       u16 cpuid;
+       u cpuid;
        u8  head_wb_ena;
        u16 qlen;
        u8  tphrdesc_ena;
index 3698396..c1f6a59 100644 (file)
 #ifndef _I40E_REGISTER_H_
 #define _I40E_REGISTER_H_
 
-#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */
-#define I40E_GL_GP_FUSE_MAX_INDEX 28
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
-#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK (0xFFFFFFFF << I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
-#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK (0x1F << I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
-#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK (0x7 << I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
-#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK (0xFF << I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
-#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
-#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK (0xFFFFFFFF << I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
-#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE 0x0009C600
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
-#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
-#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK (0x1 << I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
-
-#define I40E_PF_ARQBAH 0x00080180
+#define I40E_GL_ARQBAH 0x000801C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAH_ARQBAH_SHIFT 0
+#define I40E_GL_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAH_ARQBAH_SHIFT)
+#define I40E_GL_ARQBAL 0x000800C0 /* Reset: EMPR */
+#define I40E_GL_ARQBAL_ARQBAL_SHIFT 0
+#define I40E_GL_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ARQBAL_ARQBAL_SHIFT)
+#define I40E_GL_ARQH 0x000803C0 /* Reset: EMPR */
+#define I40E_GL_ARQH_ARQH_SHIFT 0
+#define I40E_GL_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_GL_ARQH_ARQH_SHIFT)
+#define I40E_GL_ARQT 0x000804C0 /* Reset: EMPR */
+#define I40E_GL_ARQT_ARQT_SHIFT 0
+#define I40E_GL_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_GL_ARQT_ARQT_SHIFT)
+#define I40E_GL_ATQBAH 0x00080140 /* Reset: EMPR */
+#define I40E_GL_ATQBAH_ATQBAH_SHIFT 0
+#define I40E_GL_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAH_ATQBAH_SHIFT)
+#define I40E_GL_ATQBAL 0x00080040 /* Reset: EMPR */
+#define I40E_GL_ATQBAL_ATQBAL_SHIFT 0
+#define I40E_GL_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_ATQBAL_ATQBAL_SHIFT)
+#define I40E_GL_ATQH 0x00080340 /* Reset: EMPR */
+#define I40E_GL_ATQH_ATQH_SHIFT 0
+#define I40E_GL_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_GL_ATQH_ATQH_SHIFT)
+#define I40E_GL_ATQLEN 0x00080240 /* Reset: EMPR */
+#define I40E_GL_ATQLEN_ATQLEN_SHIFT 0
+#define I40E_GL_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_GL_ATQLEN_ATQLEN_SHIFT)
+#define I40E_GL_ATQLEN_ATQVFE_SHIFT 28
+#define I40E_GL_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQVFE_SHIFT)
+#define I40E_GL_ATQLEN_ATQOVFL_SHIFT 29
+#define I40E_GL_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_GL_ATQLEN_ATQCRIT_SHIFT 30
+#define I40E_GL_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_GL_ATQLEN_ATQENABLE_SHIFT 31
+#define I40E_GL_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_GL_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_GL_ATQT 0x00080440 /* Reset: EMPR */
+#define I40E_GL_ATQT_ATQT_SHIFT 0
+#define I40E_GL_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_GL_ATQT_ATQT_SHIFT)
+#define I40E_PF_ARQBAH 0x00080180 /* Reset: EMPR */
 #define I40E_PF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_PF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_PF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_PF_ARQBAL 0x00080080
+#define I40E_PF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_PF_ARQBAL 0x00080080 /* Reset: EMPR */
 #define I40E_PF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_PF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_PF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_PF_ARQH 0x00080380
+#define I40E_PF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_PF_ARQH 0x00080380 /* Reset: EMPR */
 #define I40E_PF_ARQH_ARQH_SHIFT 0
-#define I40E_PF_ARQH_ARQH_MASK (0x3FF << I40E_PF_ARQH_ARQH_SHIFT)
-#define I40E_PF_ARQLEN 0x00080280
+#define I40E_PF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_PF_ARQH_ARQH_SHIFT)
+#define I40E_PF_ARQLEN 0x00080280 /* Reset: EMPR */
 #define I40E_PF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_PF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_PF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_PF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_PF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_PF_ARQLEN_ARQVFE_MASK (0x1 << I40E_PF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_PF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_PF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_PF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_PF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_PF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_PF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_PF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_PF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_PF_ARQT 0x00080480
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
 #define I40E_PF_ARQT_ARQT_SHIFT 0
-#define I40E_PF_ARQT_ARQT_MASK (0x3FF << I40E_PF_ARQT_ARQT_SHIFT)
-#define I40E_PF_ATQBAH 0x00080100
+#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
+#define I40E_PF_ATQBAH 0x00080100 /* Reset: EMPR */
 #define I40E_PF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_PF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_PF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_PF_ATQBAL 0x00080000
+#define I40E_PF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_PF_ATQBAL 0x00080000 /* Reset: EMPR */
 #define I40E_PF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_PF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_PF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_PF_ATQH 0x00080300
+#define I40E_PF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_PF_ATQH 0x00080300 /* Reset: EMPR */
 #define I40E_PF_ATQH_ATQH_SHIFT 0
-#define I40E_PF_ATQH_ATQH_MASK (0x3FF << I40E_PF_ATQH_ATQH_SHIFT)
-#define I40E_PF_ATQLEN 0x00080200
+#define I40E_PF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_PF_ATQH_ATQH_SHIFT)
+#define I40E_PF_ATQLEN 0x00080200 /* Reset: EMPR */
 #define I40E_PF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_PF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_PF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_PF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_PF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_PF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_PF_ATQLEN_ATQVFE_MASK (0x1 << I40E_PF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_PF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_PF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_PF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_PF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_PF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_PF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_PF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_PF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_PF_ATQT 0x00080400
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
 #define I40E_PF_ATQT_ATQT_SHIFT 0
-#define I40E_PF_ATQT_ATQT_MASK (0x3FF << I40E_PF_ATQT_ATQT_SHIFT)
-#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
+#define I40E_VF_ARQBAH(_VF) (0x00081400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAH_MAX_INDEX 127
 #define I40E_VF_ARQBAH_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAH_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL(_VF) (0x00080C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQBAL_MAX_INDEX 127
 #define I40E_VF_ARQBAL_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL_ARQBAL_SHIFT)
-#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQBAL_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL_ARQBAL_SHIFT)
+#define I40E_VF_ARQH(_VF) (0x00082400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQH_MAX_INDEX 127
 #define I40E_VF_ARQH_ARQH_SHIFT 0
-#define I40E_VF_ARQH_ARQH_MASK (0x3FF << I40E_VF_ARQH_ARQH_SHIFT)
-#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQH_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH_ARQH_SHIFT)
+#define I40E_VF_ARQLEN(_VF) (0x00081C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQLEN_MAX_INDEX 127
 #define I40E_VF_ARQLEN_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ARQT_MAX_INDEX 127
 #define I40E_VF_ARQT_ARQT_SHIFT 0
-#define I40E_VF_ARQT_ARQT_MASK (0x3FF << I40E_VF_ARQT_ARQT_SHIFT)
-#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT_ARQT_SHIFT)
+#define I40E_VF_ATQBAH(_VF) (0x00081000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAH_MAX_INDEX 127
 #define I40E_VF_ATQBAH_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAH_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL(_VF) (0x00080800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQBAL_MAX_INDEX 127
 #define I40E_VF_ATQBAL_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL_ATQBAL_SHIFT)
-#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQBAL_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL_ATQBAL_SHIFT)
+#define I40E_VF_ATQH(_VF) (0x00082000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQH_MAX_INDEX 127
 #define I40E_VF_ATQH_ATQH_SHIFT 0
-#define I40E_VF_ATQH_ATQH_MASK (0x3FF << I40E_VF_ATQH_ATQH_SHIFT)
-#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQH_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH_ATQH_SHIFT)
+#define I40E_VF_ATQLEN(_VF) (0x00081800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQLEN_MAX_INDEX 127
 #define I40E_VF_ATQLEN_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
 #define I40E_VF_ATQT_MAX_INDEX 127
 #define I40E_VF_ATQT_ATQT_SHIFT 0
-#define I40E_VF_ATQT_ATQT_MASK (0x3FF << I40E_VF_ATQT_ATQT_SHIFT)
-#define I40E_PRT_L2TAGSEN 0x001C0B20
+#define I40E_VF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT_ATQT_SHIFT)
+#define I40E_PRT_L2TAGSEN 0x001C0B20 /* Reset: CORER */
 #define I40E_PRT_L2TAGSEN_ENABLE_SHIFT 0
-#define I40E_PRT_L2TAGSEN_ENABLE_MASK (0xFF << I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
-#define I40E_PFCM_LAN_ERRDATA 0x0010C080
+#define I40E_PRT_L2TAGSEN_ENABLE_MASK I40E_MASK(0xFF, I40E_PRT_L2TAGSEN_ENABLE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA 0x0010C080 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_LAN_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_LAN_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK (0xFFF << I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_LAN_ERRINFO 0x0010C000
+#define I40E_PFCM_LAN_ERRDATA_Q_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LAN_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO 0x0010C000 /* Reset: PFR */
 #define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_LAN_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_LAN_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_PFCM_LANCTXCTL(_pf) (0x0010C300 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_LAN_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_PFCM_LANCTXCTL 0x0010C300 /* Reset: CORER */
 #define I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT 0
-#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK (0xFFF << I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_NUM_MASK I40E_MASK(0xFFF, I40E_PFCM_LANCTXCTL_QUEUE_NUM_SHIFT)
 #define I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT 12
-#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK (0x7 << I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_SUB_LINE_MASK I40E_MASK(0x7, I40E_PFCM_LANCTXCTL_SUB_LINE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT 15
-#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK (0x3 << I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
+#define I40E_PFCM_LANCTXCTL_QUEUE_TYPE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_QUEUE_TYPE_SHIFT)
 #define I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT 17
-#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK (0x3 << I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
-#define I40E_PFCM_LANCTXDATA(_i, _pf) (0x0010C100 + ((_i) * 4) + ((_pf) * 16))/* _i=0...3 _pf=0..15 */
+#define I40E_PFCM_LANCTXCTL_OP_CODE_MASK I40E_MASK(0x3, I40E_PFCM_LANCTXCTL_OP_CODE_SHIFT)
+#define I40E_PFCM_LANCTXDATA(_i) (0x0010C100 + ((_i) * 128)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PFCM_LANCTXDATA_MAX_INDEX 3
 #define I40E_PFCM_LANCTXDATA_DATA_SHIFT 0
-#define I40E_PFCM_LANCTXDATA_DATA_MASK (0xFFFFFFFF << I40E_PFCM_LANCTXDATA_DATA_SHIFT)
-#define I40E_PFCM_LANCTXSTAT(_pf) (0x0010C380 + ((_pf) * 4))/* _pf=0..15 */
+#define I40E_PFCM_LANCTXDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFCM_LANCTXDATA_DATA_SHIFT)
+#define I40E_PFCM_LANCTXSTAT 0x0010C380 /* Reset: CORER */
 #define I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT 0
-#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
+#define I40E_PFCM_LANCTXSTAT_CTX_DONE_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_DONE_SHIFT)
 #define I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT 1
-#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK (0x1 << I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
-#define I40E_PFCM_PE_ERRDATA 0x00138D00
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_PFCM_PE_ERRINFO 0x00138C80
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PFCM_LANCTXSTAT_CTX_MISS_MASK I40E_MASK(0x1, I40E_PFCM_LANCTXSTAT_CTX_MISS_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1(_VF) (0x00138800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA1_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA1_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA1_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFCM_PE_ERRDATA1_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA1_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1(_VF) (0x00138400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO1_MAX_INDEX 127
 #define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO1_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO1_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
-#define I40E_GLDCB_GENC 0x00083044
+#define I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO1_RLS_ERROR_CNT_SHIFT)
+#define I40E_GLDCB_GENC 0x00083044 /* Reset: CORER */
 #define I40E_GLDCB_GENC_PCIRTT_SHIFT 0
-#define I40E_GLDCB_GENC_PCIRTT_MASK (0xFFFF << I40E_GLDCB_GENC_PCIRTT_SHIFT)
-#define I40E_GLDCB_RUPTI 0x00122618
+#define I40E_GLDCB_GENC_PCIRTT_MASK I40E_MASK(0xFFFF, I40E_GLDCB_GENC_PCIRTT_SHIFT)
+#define I40E_GLDCB_RUPTI 0x00122618 /* Reset: CORER */
 #define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT 0
-#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK (0xFFFFFFFF << I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
-#define I40E_PRTDCB_FCCFG 0x001E4640
+#define I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLDCB_RUPTI_PFCTIMEOUT_UP_SHIFT)
+#define I40E_PRTDCB_FCCFG 0x001E4640 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCCFG_TFCE_SHIFT 3
-#define I40E_PRTDCB_FCCFG_TFCE_MASK (0x3 << I40E_PRTDCB_FCCFG_TFCE_SHIFT)
-#define I40E_PRTDCB_FCRTV 0x001E4600
+#define I40E_PRTDCB_FCCFG_TFCE_MASK I40E_MASK(0x3, I40E_PRTDCB_FCCFG_TFCE_SHIFT)
+#define I40E_PRTDCB_FCRTV 0x001E4600 /* Reset: GLOBR */
 #define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT 0
-#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK (0xFFFF << I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
-#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTDCB_FCRTV_FC_REFRESH_TH_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCRTV_FC_REFRESH_TH_SHIFT)
+#define I40E_PRTDCB_FCTTVN(_i) (0x001E4580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_FCTTVN_MAX_INDEX 3
 #define I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT 0
-#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
+#define I40E_PRTDCB_FCTTVN_TTV_2N_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_SHIFT)
 #define I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT 16
-#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK (0xFFFF << I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
-#define I40E_PRTDCB_GENC 0x00083000
+#define I40E_PRTDCB_FCTTVN_TTV_2N_P1_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_FCTTVN_TTV_2N_P1_SHIFT)
+#define I40E_PRTDCB_GENC 0x00083000 /* Reset: CORER */
 #define I40E_PRTDCB_GENC_RESERVED_1_SHIFT 0
-#define I40E_PRTDCB_GENC_RESERVED_1_MASK (0x3 << I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
+#define I40E_PRTDCB_GENC_RESERVED_1_MASK I40E_MASK(0x3, I40E_PRTDCB_GENC_RESERVED_1_SHIFT)
 #define I40E_PRTDCB_GENC_NUMTC_SHIFT 2
-#define I40E_PRTDCB_GENC_NUMTC_MASK (0xF << I40E_PRTDCB_GENC_NUMTC_SHIFT)
+#define I40E_PRTDCB_GENC_NUMTC_MASK I40E_MASK(0xF, I40E_PRTDCB_GENC_NUMTC_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_SHIFT 6
-#define I40E_PRTDCB_GENC_FCOEUP_MASK (0x7 << I40E_PRTDCB_GENC_FCOEUP_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_MASK I40E_MASK(0x7, I40E_PRTDCB_GENC_FCOEUP_SHIFT)
 #define I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT 9
-#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK (0x1 << I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
+#define I40E_PRTDCB_GENC_FCOEUP_VALID_MASK I40E_MASK(0x1, I40E_PRTDCB_GENC_FCOEUP_VALID_SHIFT)
 #define I40E_PRTDCB_GENC_PFCLDA_SHIFT 16
-#define I40E_PRTDCB_GENC_PFCLDA_MASK (0xFFFF << I40E_PRTDCB_GENC_PFCLDA_SHIFT)
-#define I40E_PRTDCB_GENS 0x00083020
+#define I40E_PRTDCB_GENC_PFCLDA_MASK I40E_MASK(0xFFFF, I40E_PRTDCB_GENC_PFCLDA_SHIFT)
+#define I40E_PRTDCB_GENS 0x00083020 /* Reset: CORER */
 #define I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT 0
-#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK (0x7 << I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
-#define I40E_PRTDCB_MFLCN 0x001E2400
+#define I40E_PRTDCB_GENS_DCBX_STATUS_MASK I40E_MASK(0x7, I40E_PRTDCB_GENS_DCBX_STATUS_SHIFT)
+#define I40E_PRTDCB_MFLCN 0x001E2400 /* Reset: GLOBR */
 #define I40E_PRTDCB_MFLCN_PMCF_SHIFT 0
-#define I40E_PRTDCB_MFLCN_PMCF_MASK (0x1 << I40E_PRTDCB_MFLCN_PMCF_SHIFT)
+#define I40E_PRTDCB_MFLCN_PMCF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_PMCF_SHIFT)
 #define I40E_PRTDCB_MFLCN_DPF_SHIFT 1
-#define I40E_PRTDCB_MFLCN_DPF_MASK (0x1 << I40E_PRTDCB_MFLCN_DPF_SHIFT)
+#define I40E_PRTDCB_MFLCN_DPF_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_DPF_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCM_SHIFT 2
-#define I40E_PRTDCB_MFLCN_RPFCM_MASK (0x1 << I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
+#define I40E_PRTDCB_MFLCN_RPFCM_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RPFCM_SHIFT)
 #define I40E_PRTDCB_MFLCN_RFCE_SHIFT 3
-#define I40E_PRTDCB_MFLCN_RFCE_MASK (0x1 << I40E_PRTDCB_MFLCN_RFCE_SHIFT)
+#define I40E_PRTDCB_MFLCN_RFCE_MASK I40E_MASK(0x1, I40E_PRTDCB_MFLCN_RFCE_SHIFT)
 #define I40E_PRTDCB_MFLCN_RPFCE_SHIFT 4
-#define I40E_PRTDCB_MFLCN_RPFCE_MASK (0xFF << I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
-#define I40E_PRTDCB_RETSC 0x001223E0
+#define I40E_PRTDCB_MFLCN_RPFCE_MASK I40E_MASK(0xFF, I40E_PRTDCB_MFLCN_RPFCE_SHIFT)
+#define I40E_PRTDCB_RETSC 0x001223E0 /* Reset: CORER */
 #define I40E_PRTDCB_RETSC_ETS_MODE_SHIFT 0
-#define I40E_PRTDCB_RETSC_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT 1
-#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK (0x1 << I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
+#define I40E_PRTDCB_RETSC_NON_ETS_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSC_NON_ETS_MODE_SHIFT)
 #define I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT 2
-#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK (0xF << I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
+#define I40E_PRTDCB_RETSC_ETS_MAX_EXP_MASK I40E_MASK(0xF, I40E_PRTDCB_RETSC_ETS_MAX_EXP_SHIFT)
 #define I40E_PRTDCB_RETSC_LLTC_SHIFT 8
-#define I40E_PRTDCB_RETSC_LLTC_MASK (0xFF << I40E_PRTDCB_RETSC_LLTC_SHIFT)
-#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_RETSC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RETSC_LLTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC(_i) (0x00122180 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_RETSTCC_MAX_INDEX 7
 #define I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT 0
-#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK (0x7F << I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_BWSHARE_MASK I40E_MASK(0x7F, I40E_PRTDCB_RETSTCC_BWSHARE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
-#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK (0x1 << I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
+#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
 #define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK (0x1 << I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
-#define I40E_PRTDCB_RPPMC 0x001223A0
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
 #define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
-#define I40E_PRTDCB_RPPMC_LANRPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT 8
-#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK (0xFF << I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
+#define I40E_PRTDCB_RPPMC_RDMARPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RDMARPPM_SHIFT)
 #define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT 16
-#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK (0xFF << I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
-#define I40E_PRTDCB_RUP 0x001C0B00
+#define I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_RX_FIFO_SIZE_SHIFT)
+#define I40E_PRTDCB_RUP 0x001C0B00 /* Reset: CORER */
 #define I40E_PRTDCB_RUP_NOVLANUP_SHIFT 0
-#define I40E_PRTDCB_RUP_NOVLANUP_MASK (0x7 << I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
-#define I40E_PRTDCB_RUP2TC 0x001C09A0
+#define I40E_PRTDCB_RUP_NOVLANUP_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP_NOVLANUP_SHIFT)
+#define I40E_PRTDCB_RUP2TC 0x001C09A0 /* Reset: CORER */
 #define I40E_PRTDCB_RUP2TC_UP0TC_SHIFT 0
-#define I40E_PRTDCB_RUP2TC_UP0TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP0TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP0TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP1TC_SHIFT 3
-#define I40E_PRTDCB_RUP2TC_UP1TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP1TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP1TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP2TC_SHIFT 6
-#define I40E_PRTDCB_RUP2TC_UP2TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP2TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP2TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP3TC_SHIFT 9
-#define I40E_PRTDCB_RUP2TC_UP3TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP3TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP3TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP4TC_SHIFT 12
-#define I40E_PRTDCB_RUP2TC_UP4TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP4TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP4TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP5TC_SHIFT 15
-#define I40E_PRTDCB_RUP2TC_UP5TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP5TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP5TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP6TC_SHIFT 18
-#define I40E_PRTDCB_RUP2TC_UP6TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
+#define I40E_PRTDCB_RUP2TC_UP6TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP6TC_SHIFT)
 #define I40E_PRTDCB_RUP2TC_UP7TC_SHIFT 21
-#define I40E_PRTDCB_RUP2TC_UP7TC_MASK (0x7 << I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
-#define I40E_PRTDCB_TC2PFC 0x001C0980
+#define I40E_PRTDCB_RUP2TC_UP7TC_MASK I40E_MASK(0x7, I40E_PRTDCB_RUP2TC_UP7TC_SHIFT)
+#define I40E_PRTDCB_TC2PFC 0x001C0980 /* Reset: CORER */
 #define I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT 0
-#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK (0xFF << I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
-#define I40E_PRTDCB_TCPMC 0x000A21A0
+#define I40E_PRTDCB_TC2PFC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TC2PFC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TCMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC 0x000A21A0 /* Reset: CORER */
 #define I40E_PRTDCB_TCPMC_CPM_SHIFT 0
-#define I40E_PRTDCB_TCPMC_CPM_MASK (0x1FFF << I40E_PRTDCB_TCPMC_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_CPM_SHIFT)
 #define I40E_PRTDCB_TCPMC_LLTC_SHIFT 13
-#define I40E_PRTDCB_TCPMC_LLTC_MASK (0xFF << I40E_PRTDCB_TCPMC_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_LLTC_SHIFT)
 #define I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TCPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TCWSTC(_i) (0x000A2040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTDCB_TCWSTC_MAX_INDEX 7
 #define I40E_PRTDCB_TCWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TCWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TDPMC 0x000A0180
+#define I40E_PRTDCB_TCWSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCWSTC_MSTC_SHIFT)
+#define I40E_PRTDCB_TDPMC 0x000A0180 /* Reset: CORER */
 #define I40E_PRTDCB_TDPMC_DPM_SHIFT 0
-#define I40E_PRTDCB_TDPMC_DPM_MASK (0xFF << I40E_PRTDCB_TDPMC_DPM_SHIFT)
+#define I40E_PRTDCB_TDPMC_DPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_TDPMC_DPM_SHIFT)
 #define I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT 30
-#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK (0x1 << I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
-#define I40E_PRTDCB_TDPUC 0x00044100
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT 0
-#define I40E_PRTDCB_TDPUC_MAX_TXFRAME_MASK (0xFFFF << I40E_PRTDCB_TDPUC_MAX_TXFRAME_SHIFT)
-#define I40E_PRTDCB_TETSC_TCB 0x000AE060
+#define I40E_PRTDCB_TDPMC_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TDPMC_TCPM_MODE_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB 0x000AE060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TCB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
-#define I40E_PRTDCB_TETSC_TPB 0x00098060
+#define I40E_PRTDCB_TETSC_TCB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TCB_LLTC_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB 0x00098060 /* Reset: CORER */
 #define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT 0
-#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK (0x1 << I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
+#define I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_MASK I40E_MASK(0x1, I40E_PRTDCB_TETSC_TPB_EN_LL_STRICT_PRIORITY_SHIFT)
 #define I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT 8
-#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK (0xFF << I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
-#define I40E_PRTDCB_TFCS 0x001E4560
+#define I40E_PRTDCB_TETSC_TPB_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TETSC_TPB_LLTC_SHIFT)
+#define I40E_PRTDCB_TFCS 0x001E4560 /* Reset: GLOBR */
 #define I40E_PRTDCB_TFCS_TXOFF_SHIFT 0
-#define I40E_PRTDCB_TFCS_TXOFF_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF0_SHIFT 8
-#define I40E_PRTDCB_TFCS_TXOFF0_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF0_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF0_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF1_SHIFT 9
-#define I40E_PRTDCB_TFCS_TXOFF1_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF1_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF1_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF2_SHIFT 10
-#define I40E_PRTDCB_TFCS_TXOFF2_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF2_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF2_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF3_SHIFT 11
-#define I40E_PRTDCB_TFCS_TXOFF3_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF3_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF3_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF4_SHIFT 12
-#define I40E_PRTDCB_TFCS_TXOFF4_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF4_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF4_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF5_SHIFT 13
-#define I40E_PRTDCB_TFCS_TXOFF5_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF5_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF5_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF6_SHIFT 14
-#define I40E_PRTDCB_TFCS_TXOFF6_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
+#define I40E_PRTDCB_TFCS_TXOFF6_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF6_SHIFT)
 #define I40E_PRTDCB_TFCS_TXOFF7_SHIFT 15
-#define I40E_PRTDCB_TFCS_TXOFF7_MASK (0x1 << I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
-#define I40E_PRTDCB_TFWSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */
-#define I40E_PRTDCB_TFWSTC_MAX_INDEX 7
-#define I40E_PRTDCB_TFWSTC_MSTC_SHIFT 0
-#define I40E_PRTDCB_TFWSTC_MSTC_MASK (0xFFFFF << I40E_PRTDCB_TFWSTC_MSTC_SHIFT)
-#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTDCB_TFCS_TXOFF7_MASK I40E_MASK(0x1, I40E_PRTDCB_TFCS_TXOFF7_SHIFT)
+#define I40E_PRTDCB_TPFCTS(_i) (0x001E4660 + ((_i) * 32)) /* _i=0...7 */ /* Reset: GLOBR */
 #define I40E_PRTDCB_TPFCTS_MAX_INDEX 7
 #define I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT 0
-#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK (0x3FFF << I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
-#define I40E_GLFCOE_RCTL 0x00269B94
+#define I40E_PRTDCB_TPFCTS_PFCTIMER_MASK I40E_MASK(0x3FFF, I40E_PRTDCB_TPFCTS_PFCTIMER_SHIFT)
+#define I40E_GLFCOE_RCTL 0x00269B94 /* Reset: CORER */
 #define I40E_GLFCOE_RCTL_FCOEVER_SHIFT 0
-#define I40E_GLFCOE_RCTL_FCOEVER_MASK (0xF << I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
+#define I40E_GLFCOE_RCTL_FCOEVER_MASK I40E_MASK(0xF, I40E_GLFCOE_RCTL_FCOEVER_SHIFT)
 #define I40E_GLFCOE_RCTL_SAVBAD_SHIFT 4
-#define I40E_GLFCOE_RCTL_SAVBAD_MASK (0x1 << I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
+#define I40E_GLFCOE_RCTL_SAVBAD_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_SAVBAD_SHIFT)
 #define I40E_GLFCOE_RCTL_ICRC_SHIFT 5
-#define I40E_GLFCOE_RCTL_ICRC_MASK (0x1 << I40E_GLFCOE_RCTL_ICRC_SHIFT)
+#define I40E_GLFCOE_RCTL_ICRC_MASK I40E_MASK(0x1, I40E_GLFCOE_RCTL_ICRC_SHIFT)
 #define I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT 16
-#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK (0x3FFF << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
-#define I40E_GL_FWSTS 0x00083048
+#define I40E_GLFCOE_RCTL_MAX_SIZE_MASK I40E_MASK(0x3FFF, I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT)
+#define I40E_GL_FWSTS 0x00083048 /* Reset: POR */
 #define I40E_GL_FWSTS_FWS0B_SHIFT 0
-#define I40E_GL_FWSTS_FWS0B_MASK (0xFF << I40E_GL_FWSTS_FWS0B_SHIFT)
+#define I40E_GL_FWSTS_FWS0B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS0B_SHIFT)
 #define I40E_GL_FWSTS_FWRI_SHIFT 9
-#define I40E_GL_FWSTS_FWRI_MASK (0x1 << I40E_GL_FWSTS_FWRI_SHIFT)
+#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
 #define I40E_GL_FWSTS_FWS1B_SHIFT 16
-#define I40E_GL_FWSTS_FWS1B_MASK (0xFF << I40E_GL_FWSTS_FWS1B_SHIFT)
-#define I40E_GLGEN_CLKSTAT 0x000B8184
+#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
 #define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
-#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK (0x1 << I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
+#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
 #define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT 4
-#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK (0x3 << I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_U_CLK_SPEED_MASK I40E_MASK(0x3, I40E_GLGEN_CLKSTAT_U_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT 8
-#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P0_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT 12
-#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P1_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT 16
-#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P2_CLK_SPEED_SHIFT)
 #define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT 20
-#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK (0x7 << I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
-#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */
+#define I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_MASK I40E_MASK(0x7, I40E_GLGEN_CLKSTAT_P3_CLK_SPEED_SHIFT)
+#define I40E_GLGEN_GPIO_CTL(_i) (0x00088100 + ((_i) * 4)) /* _i=0...29 */ /* Reset: POR */
 #define I40E_GLGEN_GPIO_CTL_MAX_INDEX 29
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT 0
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK (0x3 << I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT 3
-#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PRT_NUM_NA_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT 4
-#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK (0x1 << I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_DIR_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_PIN_DIR_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT 5
-#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_TRI_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_TRI_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT 6
-#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_CTL_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_CTL_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT 7
-#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK (0x7 << I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_PIN_FUNC_MASK I40E_MASK(0x7, I40E_GLGEN_GPIO_CTL_PIN_FUNC_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT 10
-#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_INVRT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_INVRT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT 11
-#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK (0x1 << I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_BLINK_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_LED_BLINK_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT 12
-#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK (0xF << I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_LED_MODE_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT 17
-#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK (0x3 << I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_INT_MODE_MASK I40E_MASK(0x3, I40E_GLGEN_GPIO_CTL_INT_MODE_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT 19
-#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK (0x1 << I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
+#define I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_CTL_OUT_DEFAULT_SHIFT)
 #define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT 20
-#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK (0x3F << I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
-#define I40E_GLGEN_GPIO_SET 0x00088184
+#define I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_MASK I40E_MASK(0x3F, I40E_GLGEN_GPIO_CTL_PHY_PIN_NAME_SHIFT)
+#define I40E_GLGEN_GPIO_SET 0x00088184 /* Reset: POR */
 #define I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT 0
-#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK (0x1F << I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
+#define I40E_GLGEN_GPIO_SET_GPIO_INDX_MASK I40E_MASK(0x1F, I40E_GLGEN_GPIO_SET_GPIO_INDX_SHIFT)
 #define I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT 5
-#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK (0x1 << I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
+#define I40E_GLGEN_GPIO_SET_SDP_DATA_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_SDP_DATA_SHIFT)
 #define I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT 6
-#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK (0x1 << I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
-#define I40E_GLGEN_GPIO_STAT 0x0008817C
+#define I40E_GLGEN_GPIO_SET_DRIVE_SDP_MASK I40E_MASK(0x1, I40E_GLGEN_GPIO_SET_DRIVE_SDP_SHIFT)
+#define I40E_GLGEN_GPIO_STAT 0x0008817C /* Reset: POR */
 #define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT 0
-#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
-#define I40E_GLGEN_GPIO_TRANSIT 0x00088180
+#define I40E_GLGEN_GPIO_STAT_GPIO_VALUE_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_STAT_GPIO_VALUE_SHIFT)
+#define I40E_GLGEN_GPIO_TRANSIT 0x00088180 /* Reset: POR */
 #define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT 0
-#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK (0x3FFFFFFF << I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
-#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_GPIO_TRANSIT_GPIO_TRANSITION_SHIFT)
+#define I40E_GLGEN_I2CCMD(_i) (0x000881E0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CCMD_MAX_INDEX 3
 #define I40E_GLGEN_I2CCMD_DATA_SHIFT 0
-#define I40E_GLGEN_I2CCMD_DATA_MASK (0xFFFF << I40E_GLGEN_I2CCMD_DATA_SHIFT)
+#define I40E_GLGEN_I2CCMD_DATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_I2CCMD_DATA_SHIFT)
 #define I40E_GLGEN_I2CCMD_REGADD_SHIFT 16
-#define I40E_GLGEN_I2CCMD_REGADD_MASK (0xFF << I40E_GLGEN_I2CCMD_REGADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_REGADD_MASK I40E_MASK(0xFF, I40E_GLGEN_I2CCMD_REGADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_PHYADD_SHIFT 24
-#define I40E_GLGEN_I2CCMD_PHYADD_MASK (0x7 << I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
+#define I40E_GLGEN_I2CCMD_PHYADD_MASK I40E_MASK(0x7, I40E_GLGEN_I2CCMD_PHYADD_SHIFT)
 #define I40E_GLGEN_I2CCMD_OP_SHIFT 27
-#define I40E_GLGEN_I2CCMD_OP_MASK (0x1 << I40E_GLGEN_I2CCMD_OP_SHIFT)
+#define I40E_GLGEN_I2CCMD_OP_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_OP_SHIFT)
 #define I40E_GLGEN_I2CCMD_RESET_SHIFT 28
-#define I40E_GLGEN_I2CCMD_RESET_MASK (0x1 << I40E_GLGEN_I2CCMD_RESET_SHIFT)
+#define I40E_GLGEN_I2CCMD_RESET_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_RESET_SHIFT)
 #define I40E_GLGEN_I2CCMD_R_SHIFT 29
-#define I40E_GLGEN_I2CCMD_R_MASK (0x1 << I40E_GLGEN_I2CCMD_R_SHIFT)
+#define I40E_GLGEN_I2CCMD_R_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_R_SHIFT)
 #define I40E_GLGEN_I2CCMD_E_SHIFT 31
-#define I40E_GLGEN_I2CCMD_E_MASK (0x1 << I40E_GLGEN_I2CCMD_E_SHIFT)
-#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_I2CCMD_E_MASK I40E_MASK(0x1, I40E_GLGEN_I2CCMD_E_SHIFT)
+#define I40E_GLGEN_I2CPARAMS(_i) (0x000881AC + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_I2CPARAMS_MAX_INDEX 3
 #define I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT 0
-#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK (0x1F << I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_WRITE_TIME_MASK I40E_MASK(0x1F, I40E_GLGEN_I2CPARAMS_WRITE_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT 5
-#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK (0x7 << I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_READ_TIME_MASK I40E_MASK(0x7, I40E_GLGEN_I2CPARAMS_READ_TIME_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT 8
-#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_I2CBB_EN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2CBB_EN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_SHIFT 9
-#define I40E_GLGEN_I2CPARAMS_CLK_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT 10
-#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OUT_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OUT_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT 11
-#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT 12
-#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_DATA_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_DATA_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT 13
-#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_OE_N_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_OE_N_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT 14
-#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_IN_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_IN_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT 15
-#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK (0x1 << I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
+#define I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_CLK_STRETCH_DIS_SHIFT)
 #define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT 31
-#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK (0x1 << I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
-#define I40E_GLGEN_LED_CTL 0x00088178
+#define I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_MASK I40E_MASK(0x1, I40E_GLGEN_I2CPARAMS_I2C_DATA_ORDER_SHIFT)
+#define I40E_GLGEN_LED_CTL 0x00088178 /* Reset: POR */
 #define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT 0
-#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK (0x1 << I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
-#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_LED_CTL_GLOBAL_BLINK_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL(_i) (0x000881D0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_CTRL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT 0
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK (0x1FFFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_MASK I40E_MASK(0x1FFFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD2_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT 17
-#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK (0x1 << I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
+#define I40E_GLGEN_MDIO_CTRL_CONTMDC_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_CTRL_CONTMDC_SHIFT)
 #define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT 18
-#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK (0x3FFF << I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
-#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_MASK I40E_MASK(0x3FFF, I40E_GLGEN_MDIO_CTRL_LEGACY_RSVD1_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL(_i) (0x000881C0 + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MDIO_I2C_SEL_MAX_INDEX 3
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT 0
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_MDIO_I2C_SEL_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT 1
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_PHY_PORT_NUM_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT 5
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY0_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT 10
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY1_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT 15
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY2_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT 20
-#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK (0x1F << I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_MASK I40E_MASK(0x1F, I40E_GLGEN_MDIO_I2C_SEL_PHY3_ADDRESS_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT 25
-#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK (0xF << I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
+#define I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_MASK I40E_MASK(0xF, I40E_GLGEN_MDIO_I2C_SEL_MDIO_IF_MODE_SHIFT)
 #define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT 31
-#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK (0x1 << I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
-#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_MASK I40E_MASK(0x1, I40E_GLGEN_MDIO_I2C_SEL_EN_FAST_MODE_SHIFT)
+#define I40E_GLGEN_MSCA(_i) (0x0008818C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSCA_MAX_INDEX 3
 #define I40E_GLGEN_MSCA_MDIADD_SHIFT 0
-#define I40E_GLGEN_MSCA_MDIADD_MASK (0xFFFF << I40E_GLGEN_MSCA_MDIADD_SHIFT)
+#define I40E_GLGEN_MSCA_MDIADD_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSCA_MDIADD_SHIFT)
 #define I40E_GLGEN_MSCA_DEVADD_SHIFT 16
-#define I40E_GLGEN_MSCA_DEVADD_MASK (0x1F << I40E_GLGEN_MSCA_DEVADD_SHIFT)
+#define I40E_GLGEN_MSCA_DEVADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_DEVADD_SHIFT)
 #define I40E_GLGEN_MSCA_PHYADD_SHIFT 21
-#define I40E_GLGEN_MSCA_PHYADD_MASK (0x1F << I40E_GLGEN_MSCA_PHYADD_SHIFT)
+#define I40E_GLGEN_MSCA_PHYADD_MASK I40E_MASK(0x1F, I40E_GLGEN_MSCA_PHYADD_SHIFT)
 #define I40E_GLGEN_MSCA_OPCODE_SHIFT 26
-#define I40E_GLGEN_MSCA_OPCODE_MASK (0x3 << I40E_GLGEN_MSCA_OPCODE_SHIFT)
+#define I40E_GLGEN_MSCA_OPCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_OPCODE_SHIFT)
 #define I40E_GLGEN_MSCA_STCODE_SHIFT 28
-#define I40E_GLGEN_MSCA_STCODE_MASK (0x3 << I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_GLGEN_MSCA_STCODE_MASK I40E_MASK(0x3, I40E_GLGEN_MSCA_STCODE_SHIFT)
 #define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
-#define I40E_GLGEN_MSCA_MDICMD_MASK (0x1 << I40E_GLGEN_MSCA_MDICMD_SHIFT)
+#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
 #define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK (0x1 << I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
-#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_GLGEN_MSRWD_MAX_INDEX 3
 #define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
-#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
+#define I40E_GLGEN_MSRWD_MDIWRDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT)
 #define I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT 16
-#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK (0xFFFF << I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
-#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4
+#define I40E_GLGEN_MSRWD_MDIRDDATA_MASK I40E_MASK(0xFFFF, I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT 0x001C0AB4 /* Reset: PCIR */
 #define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT 0
-#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK (0x1F << I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
+#define I40E_GLGEN_PCIFCNCNT_PCIPFCNT_MASK I40E_MASK(0x1F, I40E_GLGEN_PCIFCNCNT_PCIPFCNT_SHIFT)
 #define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT 16
-#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK (0xFF << I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
-#define I40E_GLGEN_PE_ENA 0x000B81A0
-#define I40E_GLGEN_PE_ENA_PE_ENA_SHIFT 0
-#define I40E_GLGEN_PE_ENA_PE_ENA_MASK (0x1 << I40E_GLGEN_PE_ENA_PE_ENA_SHIFT)
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT 1
-#define I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_MASK (0x3 << I40E_GLGEN_PE_ENA_PE_CLK_SRC_SEL_SHIFT)
-#define I40E_GLGEN_RSTAT 0x000B8188
+#define I40E_GLGEN_PCIFCNCNT_PCIVFCNT_MASK I40E_MASK(0xFF, I40E_GLGEN_PCIFCNCNT_PCIVFCNT_SHIFT)
+#define I40E_GLGEN_RSTAT 0x000B8188 /* Reset: POR */
 #define I40E_GLGEN_RSTAT_DEVSTATE_SHIFT 0
-#define I40E_GLGEN_RSTAT_DEVSTATE_MASK (0x3 << I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
+#define I40E_GLGEN_RSTAT_DEVSTATE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_DEVSTATE_SHIFT)
 #define I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT 2
-#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK (0x3 << I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
+#define I40E_GLGEN_RSTAT_RESET_TYPE_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT)
 #define I40E_GLGEN_RSTAT_CORERCNT_SHIFT 4
-#define I40E_GLGEN_RSTAT_CORERCNT_MASK (0x3 << I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_CORERCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_CORERCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT 6
-#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_GLOBRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_GLOBRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_EMPRCNT_SHIFT 8
-#define I40E_GLGEN_RSTAT_EMPRCNT_MASK (0x3 << I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
+#define I40E_GLGEN_RSTAT_EMPRCNT_MASK I40E_MASK(0x3, I40E_GLGEN_RSTAT_EMPRCNT_SHIFT)
 #define I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT 10
-#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK (0x3F << I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
-#define I40E_GLGEN_RSTCTL 0x000B8180
+#define I40E_GLGEN_RSTAT_TIME_TO_RST_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTAT_TIME_TO_RST_SHIFT)
+#define I40E_GLGEN_RSTCTL 0x000B8180 /* Reset: POR */
 #define I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT 0
-#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK (0x3F << I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
+#define I40E_GLGEN_RSTCTL_GRSTDEL_MASK I40E_MASK(0x3F, I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT)
 #define I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT 8
-#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
-#define I40E_GLGEN_RSTENA_EMP 0x000B818C
+#define I40E_GLGEN_RSTCTL_ECC_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTCTL_ECC_RST_ENA_SHIFT)
+#define I40E_GLGEN_RSTENA_EMP 0x000B818C /* Reset: POR */
 #define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT 0
-#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK (0x1 << I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
-#define I40E_GLGEN_RTRIG 0x000B8190
+#define I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_MASK I40E_MASK(0x1, I40E_GLGEN_RSTENA_EMP_EMP_RST_ENA_SHIFT)
+#define I40E_GLGEN_RTRIG 0x000B8190 /* Reset: CORER */
 #define I40E_GLGEN_RTRIG_CORER_SHIFT 0
-#define I40E_GLGEN_RTRIG_CORER_MASK (0x1 << I40E_GLGEN_RTRIG_CORER_SHIFT)
+#define I40E_GLGEN_RTRIG_CORER_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_CORER_SHIFT)
 #define I40E_GLGEN_RTRIG_GLOBR_SHIFT 1
-#define I40E_GLGEN_RTRIG_GLOBR_MASK (0x1 << I40E_GLGEN_RTRIG_GLOBR_SHIFT)
+#define I40E_GLGEN_RTRIG_GLOBR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_GLOBR_SHIFT)
 #define I40E_GLGEN_RTRIG_EMPFWR_SHIFT 2
-#define I40E_GLGEN_RTRIG_EMPFWR_MASK (0x1 << I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
-#define I40E_GLGEN_STAT 0x000B612C
+#define I40E_GLGEN_RTRIG_EMPFWR_MASK I40E_MASK(0x1, I40E_GLGEN_RTRIG_EMPFWR_SHIFT)
+#define I40E_GLGEN_STAT 0x000B612C /* Reset: POR */
 #define I40E_GLGEN_STAT_HWRSVD0_SHIFT 0
-#define I40E_GLGEN_STAT_HWRSVD0_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD0_SHIFT)
+#define I40E_GLGEN_STAT_HWRSVD0_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD0_SHIFT)
 #define I40E_GLGEN_STAT_DCBEN_SHIFT 2
-#define I40E_GLGEN_STAT_DCBEN_MASK (0x1 << I40E_GLGEN_STAT_DCBEN_SHIFT)
+#define I40E_GLGEN_STAT_DCBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_DCBEN_SHIFT)
 #define I40E_GLGEN_STAT_VTEN_SHIFT 3
-#define I40E_GLGEN_STAT_VTEN_MASK (0x1 << I40E_GLGEN_STAT_VTEN_SHIFT)
+#define I40E_GLGEN_STAT_VTEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_VTEN_SHIFT)
 #define I40E_GLGEN_STAT_FCOEN_SHIFT 4
-#define I40E_GLGEN_STAT_FCOEN_MASK (0x1 << I40E_GLGEN_STAT_FCOEN_SHIFT)
+#define I40E_GLGEN_STAT_FCOEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_FCOEN_SHIFT)
 #define I40E_GLGEN_STAT_EVBEN_SHIFT 5
-#define I40E_GLGEN_STAT_EVBEN_MASK (0x1 << I40E_GLGEN_STAT_EVBEN_SHIFT)
+#define I40E_GLGEN_STAT_EVBEN_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_EVBEN_SHIFT)
 #define I40E_GLGEN_STAT_HWRSVD1_SHIFT 6
-#define I40E_GLGEN_STAT_HWRSVD1_MASK (0x3 << I40E_GLGEN_STAT_HWRSVD1_SHIFT)
-#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLGEN_STAT_HWRSVD1_MASK I40E_MASK(0x3, I40E_GLGEN_STAT_HWRSVD1_SHIFT)
+#define I40E_GLGEN_VFLRSTAT(_i) (0x00092600 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLGEN_VFLRSTAT_MAX_INDEX 3
 #define I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT 0
-#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK (0xFFFFFFFF << I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
-#define I40E_GLVFGEN_TIMER 0x000881BC
+#define I40E_GLGEN_VFLRSTAT_VFLRE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_VFLRSTAT_VFLRE_SHIFT)
+#define I40E_GLVFGEN_TIMER 0x000881BC /* Reset: CORER */
 #define I40E_GLVFGEN_TIMER_GTIME_SHIFT 0
-#define I40E_GLVFGEN_TIMER_GTIME_MASK (0xFFFFFFFF << I40E_GLVFGEN_TIMER_GTIME_SHIFT)
-#define I40E_PFGEN_CTRL 0x00092400
+#define I40E_GLVFGEN_TIMER_GTIME_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVFGEN_TIMER_GTIME_SHIFT)
+#define I40E_PFGEN_CTRL 0x00092400 /* Reset: PFR */
 #define I40E_PFGEN_CTRL_PFSWR_SHIFT 0
-#define I40E_PFGEN_CTRL_PFSWR_MASK (0x1 << I40E_PFGEN_CTRL_PFSWR_SHIFT)
-#define I40E_PFGEN_DRUN 0x00092500
+#define I40E_PFGEN_CTRL_PFSWR_MASK I40E_MASK(0x1, I40E_PFGEN_CTRL_PFSWR_SHIFT)
+#define I40E_PFGEN_DRUN 0x00092500 /* Reset: CORER */
 #define I40E_PFGEN_DRUN_DRVUNLD_SHIFT 0
-#define I40E_PFGEN_DRUN_DRVUNLD_MASK (0x1 << I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
-#define I40E_PFGEN_PORTNUM 0x001C0480
+#define I40E_PFGEN_DRUN_DRVUNLD_MASK I40E_MASK(0x1, I40E_PFGEN_DRUN_DRVUNLD_SHIFT)
+#define I40E_PFGEN_PORTNUM 0x001C0480 /* Reset: CORER */
 #define I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
-#define I40E_PFGEN_STATE 0x00088000
-#define I40E_PFGEN_STATE_PFPEEN_SHIFT 0
-#define I40E_PFGEN_STATE_PFPEEN_MASK (0x1 << I40E_PFGEN_STATE_PFPEEN_SHIFT)
+#define I40E_PFGEN_PORTNUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTNUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_STATE 0x00088000 /* Reset: CORER */
+#define I40E_PFGEN_STATE_RESERVED_0_SHIFT 0
+#define I40E_PFGEN_STATE_RESERVED_0_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_RESERVED_0_SHIFT)
 #define I40E_PFGEN_STATE_PFFCEN_SHIFT 1
-#define I40E_PFGEN_STATE_PFFCEN_MASK (0x1 << I40E_PFGEN_STATE_PFFCEN_SHIFT)
+#define I40E_PFGEN_STATE_PFFCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFFCEN_SHIFT)
 #define I40E_PFGEN_STATE_PFLINKEN_SHIFT 2
-#define I40E_PFGEN_STATE_PFLINKEN_MASK (0x1 << I40E_PFGEN_STATE_PFLINKEN_SHIFT)
+#define I40E_PFGEN_STATE_PFLINKEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFLINKEN_SHIFT)
 #define I40E_PFGEN_STATE_PFSCEN_SHIFT 3
-#define I40E_PFGEN_STATE_PFSCEN_MASK (0x1 << I40E_PFGEN_STATE_PFSCEN_SHIFT)
-#define I40E_PRTGEN_CNF 0x000B8120
+#define I40E_PFGEN_STATE_PFSCEN_MASK I40E_MASK(0x1, I40E_PFGEN_STATE_PFSCEN_SHIFT)
+#define I40E_PRTGEN_CNF 0x000B8120 /* Reset: POR */
 #define I40E_PRTGEN_CNF_PORT_DIS_SHIFT 0
-#define I40E_PRTGEN_CNF_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT 1
-#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF_ALLOW_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_ALLOW_PORT_DIS_SHIFT)
 #define I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT 2
-#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK (0x1 << I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
-#define I40E_PRTGEN_CNF2 0x000B8160
+#define I40E_PRTGEN_CNF_EMP_PORT_DIS_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF_EMP_PORT_DIS_SHIFT)
+#define I40E_PRTGEN_CNF2 0x000B8160 /* Reset: POR */
 #define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT 0
-#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK (0x1 << I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
-#define I40E_PRTGEN_STATUS 0x000B8100
+#define I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_MASK I40E_MASK(0x1, I40E_PRTGEN_CNF2_ACTIVATE_PORT_LINK_SHIFT)
+#define I40E_PRTGEN_STATUS 0x000B8100 /* Reset: POR */
 #define I40E_PRTGEN_STATUS_PORT_VALID_SHIFT 0
-#define I40E_PRTGEN_STATUS_PORT_VALID_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
+#define I40E_PRTGEN_STATUS_PORT_VALID_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_VALID_SHIFT)
 #define I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT 1
-#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK (0x1 << I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
-#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PRTGEN_STATUS_PORT_ACTIVE_MASK I40E_MASK(0x1, I40E_PRTGEN_STATUS_PORT_ACTIVE_SHIFT)
+#define I40E_VFGEN_RSTAT1(_VF) (0x00074400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFGEN_RSTAT1_MAX_INDEX 127
 #define I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
-#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFGEN_RSTAT1_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT1_VFR_STATE_SHIFT)
+#define I40E_VPGEN_VFRSTAT(_VF) (0x00091C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRSTAT_MAX_INDEX 127
 #define I40E_VPGEN_VFRSTAT_VFRD_SHIFT 0
-#define I40E_VPGEN_VFRSTAT_VFRD_MASK (0x1 << I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
-#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPGEN_VFRSTAT_VFRD_MASK I40E_MASK(0x1, I40E_VPGEN_VFRSTAT_VFRD_SHIFT)
+#define I40E_VPGEN_VFRTRIG(_VF) (0x00091800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPGEN_VFRTRIG_MAX_INDEX 127
 #define I40E_VPGEN_VFRTRIG_VFSWR_SHIFT 0
-#define I40E_VPGEN_VFRTRIG_VFSWR_MASK (0x1 << I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
-#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPGEN_VFRTRIG_VFSWR_MASK I40E_MASK(0x1, I40E_VPGEN_VFRTRIG_VFSWR_SHIFT)
+#define I40E_VSIGEN_RSTAT(_VSI) (0x00090800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RSTAT_MAX_INDEX 383
 #define I40E_VSIGEN_RSTAT_VMRD_SHIFT 0
-#define I40E_VSIGEN_RSTAT_VMRD_MASK (0x1 << I40E_VSIGEN_RSTAT_VMRD_SHIFT)
-#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VSIGEN_RSTAT_VMRD_MASK I40E_MASK(0x1, I40E_VSIGEN_RSTAT_VMRD_SHIFT)
+#define I40E_VSIGEN_RTRIG(_VSI) (0x00090000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_VSIGEN_RTRIG_MAX_INDEX 383
 #define I40E_VSIGEN_RTRIG_VMSWR_SHIFT 0
-#define I40E_VSIGEN_RTRIG_VMSWR_MASK (0x1 << I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
-#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4))
-#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_CEQPART_MAX_INDEX 15
-#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VSIGEN_RTRIG_VMSWR_MASK I40E_MASK(0x1, I40E_VSIGEN_RTRIG_VMSWR_SHIFT)
+#define I40E_GLHMC_FCOEDDPBASE(_i) (0x000C6600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT 0
-#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
-#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEDDPBASE_FPMFCOEDDPBASE_SHIFT)
+#define I40E_GLHMC_FCOEDDPCNT(_i) (0x000C6700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT 0
-#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK (0xFFFFF << I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
-#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010
+#define I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_MASK I40E_MASK(0xFFFFF, I40E_GLHMC_FCOEDDPCNT_FPMFCOEDDPCNT_SHIFT)
+#define I40E_GLHMC_FCOEDDPOBJSZ 0x000C2010 /* Reset: CORER */
 #define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK (0xF << I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEDDPOBJSZ_PMFCOEDDPOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEFBASE(_i) (0x000C6800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFBASE_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT 0
-#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK (0xFFFFFF << I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
-#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FCOEFBASE_FPMFCOEFBASE_SHIFT)
+#define I40E_GLHMC_FCOEFCNT(_i) (0x000C6900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FCOEFCNT_MAX_INDEX 15
 #define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT 0
-#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK (0x7FFFFF << I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
-#define I40E_GLHMC_FCOEFMAX 0x000C20D0
+#define I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_FCOEFCNT_FPMFCOEFCNT_SHIFT)
+#define I40E_GLHMC_FCOEFMAX 0x000C20D0 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT 0
-#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK (0xFFFF << I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
-#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018
+#define I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_FCOEFMAX_PMFCOEFMAX_SHIFT)
+#define I40E_GLHMC_FCOEFOBJSZ 0x000C2018 /* Reset: CORER */
 #define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT 0
-#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK (0xF << I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
-#define I40E_GLHMC_FCOEMAX 0x000C2014
+#define I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FCOEFOBJSZ_PMFCOEFOBJSZ_SHIFT)
+#define I40E_GLHMC_FCOEMAX 0x000C2014 /* Reset: CORER */
 #define I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT 0
-#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK (0x1FFF << I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
-#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FCOEMAX_PMFCOEMAX_MASK I40E_MASK(0x1FFF, I40E_GLHMC_FCOEMAX_PMFCOEMAX_SHIFT)
+#define I40E_GLHMC_FSIAVBASE(_i) (0x000C5600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_FSIAVCNT(_i) (0x000C5700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIAVCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIAVCNT_FPMFSIAVCNT_SHIFT)
 #define I40E_GLHMC_FSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_FSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_FSIAVMAX 0x000C2068
+#define I40E_GLHMC_FSIAVCNT_RSVD_MASK I40E_MASK(0x7, I40E_GLHMC_FSIAVCNT_RSVD_SHIFT)
+#define I40E_GLHMC_FSIAVMAX 0x000C2068 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT 0
-#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK (0x1FFFF << I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
-#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064
+#define I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_FSIAVMAX_PMFSIAVMAX_SHIFT)
+#define I40E_GLHMC_FSIAVOBJSZ 0x000C2064 /* Reset: CORER */
 #define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK (0xF << I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
-#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIAVOBJSZ_PMFSIAVOBJSZ_SHIFT)
+#define I40E_GLHMC_FSIMCBASE(_i) (0x000C6000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCBASE_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT 0
-#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK (0xFFFFFF << I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
-#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_FSIMCBASE_FPMFSIMCBASE_SHIFT)
+#define I40E_GLHMC_FSIMCCNT(_i) (0x000C6100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_FSIMCCNT_MAX_INDEX 15
 #define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK (0x1FFFFFFF << I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
-#define I40E_GLHMC_FSIMCMAX 0x000C2060
+#define I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_FSIMCCNT_FPMFSIMCSZ_SHIFT)
+#define I40E_GLHMC_FSIMCMAX 0x000C2060 /* Reset: CORER */
 #define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT 0
-#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK (0x3FFF << I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
-#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c
+#define I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_MASK I40E_MASK(0x3FFF, I40E_GLHMC_FSIMCMAX_PMFSIMCMAX_SHIFT)
+#define I40E_GLHMC_FSIMCOBJSZ 0x000C205c /* Reset: CORER */
 #define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT 0
-#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK (0xF << I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
-#define I40E_GLHMC_LANQMAX 0x000C2008
+#define I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_FSIMCOBJSZ_PMFSIMCOBJSZ_SHIFT)
+#define I40E_GLHMC_LANQMAX 0x000C2008 /* Reset: CORER */
 #define I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT 0
-#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK (0x7FF << I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
-#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANQMAX_PMLANQMAX_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANQMAX_PMLANQMAX_SHIFT)
+#define I40E_GLHMC_LANRXBASE(_i) (0x000C6400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT 0
-#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
-#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXBASE_FPMLANRXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANRXBASE_FPMLANRXBASE_SHIFT)
+#define I40E_GLHMC_LANRXCNT(_i) (0x000C6500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANRXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT 0
-#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK (0x7FF << I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
-#define I40E_GLHMC_LANRXOBJSZ 0x000C200c
+#define I40E_GLHMC_LANRXCNT_FPMLANRXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANRXCNT_FPMLANRXCNT_SHIFT)
+#define I40E_GLHMC_LANRXOBJSZ 0x000C200c /* Reset: CORER */
 #define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK (0xF << I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
-#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANRXOBJSZ_PMLANRXOBJSZ_SHIFT)
+#define I40E_GLHMC_LANTXBASE(_i) (0x000C6200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXBASE_MAX_INDEX 15
 #define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT 0
-#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK (0xFFFFFF << I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
+#define I40E_GLHMC_LANTXBASE_FPMLANTXBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_LANTXBASE_FPMLANTXBASE_SHIFT)
 #define I40E_GLHMC_LANTXBASE_RSVD_SHIFT 24
-#define I40E_GLHMC_LANTXBASE_RSVD_MASK (0xFF << I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
-#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXBASE_RSVD_MASK I40E_MASK(0xFF, I40E_GLHMC_LANTXBASE_RSVD_SHIFT)
+#define I40E_GLHMC_LANTXCNT(_i) (0x000C6300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_LANTXCNT_MAX_INDEX 15
 #define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT 0
-#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK (0x7FF << I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
-#define I40E_GLHMC_LANTXOBJSZ 0x000C2004
+#define I40E_GLHMC_LANTXCNT_FPMLANTXCNT_MASK I40E_MASK(0x7FF, I40E_GLHMC_LANTXCNT_FPMLANTXCNT_SHIFT)
+#define I40E_GLHMC_LANTXOBJSZ 0x000C2004 /* Reset: CORER */
 #define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT 0
-#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK (0xF << I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
-#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_PEARPMAX 0x000C2038
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
-#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK (0x1FFFF << I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
-#define I40E_GLHMC_PEARPOBJSZ 0x000C2034
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK (0x7 << I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
-#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_PECQOBJSZ 0x000C2020
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK (0xF << I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK (0xF << I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
-#define I40E_GLHMC_PEHTMAX 0x000C2030
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
-#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK (0x1FFFFF << I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
-#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_PEMRMAX 0x000C2040
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
-#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK (0x7FFFFF << I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
-#define I40E_GLHMC_PEMROBJSZ 0x000C203c
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
-#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK (0xF << I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
-#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_PEPBLMAX 0x000C206c
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
-#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_PEQ1FLCNT(_i) (0x000C5500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQ1FLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_PEQ1FLMAX 0x000C2058
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
-#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
-#define I40E_GLHMC_PEQ1MAX 0x000C2054
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
-#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK (0x3FFFFFF << I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
-#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK (0xF << I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
-#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_PEQPOBJSZ 0x000C201c
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK (0xF << I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_PESRQMAX 0x000C2028
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
-#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK (0xFFFF << I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ 0x000C2024
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
-#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK (0xF << I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PESRQOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PESRQOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_PETIMERMAX 0x000C2084
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
-#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK (0x1FFFFFFF << I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
-#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
-#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK (0xF << I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_PEXFFLCNT(_i) (0x000C5100 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLHMC_PEXFFLCNT_MAX_INDEX 15
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_PEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_PEXFFLMAX 0x000C204c
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
-#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK (0x1FFFFFF << I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
-#define I40E_GLHMC_PEXFMAX 0x000C2048
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
-#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK (0x3FFFFFF << I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ 0x000C2044
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
-#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK (0xF << I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT 4
-#define I40E_GLHMC_PEXFOBJSZ_RSVD_MASK (0xFFFFFFF << I40E_GLHMC_PEXFOBJSZ_RSVD_SHIFT)
-#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_LANTXOBJSZ_PMLANTXOBJSZ_SHIFT)
+#define I40E_GLHMC_PFASSIGN(_i) (0x000C0c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_PFASSIGN_MAX_INDEX 15
 #define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT 0
-#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK (0xF << I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
-#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_MASK I40E_MASK(0xF, I40E_GLHMC_PFASSIGN_PMFCNPFASSIGN_SHIFT)
+#define I40E_GLHMC_SDPART(_i) (0x000C0800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLHMC_SDPART_MAX_INDEX 15
 #define I40E_GLHMC_SDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_SDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_SDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_SDPART_PMSDBASE_SHIFT)
 #define I40E_GLHMC_SDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_SDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
-#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4))
-#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
-#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
-#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK (0xFF << I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
-#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK (0x1FF << I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
-#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK (0x3FFF << I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
-#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK (0x3FFF << I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
-#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK (0x7FFF << I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
-#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
-#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK (0xFFFFFF << I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
-#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT 29
-#define I40E_GLHMC_VFFSIAVCNT_RSVD_MASK (0x7 << I40E_GLHMC_VFFSIAVCNT_RSVD_SHIFT)
-#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
-#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
-#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK (0xFFF << I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
-#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
-#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK (0x1FF << I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
-#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
-#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
-#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
-#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
-#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
-#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
-#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
-#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
-#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
-#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
-#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
-#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
-#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
-#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
-#define I40E_GLHMC_VFPEQ1FLCNT(_i) (0x000Cd500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQ1FLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQ1FLCNT_FPMPEQ1FLCNT_SHIFT)
-#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
-#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
-#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
-#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
-#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
-#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
-#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
-#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
-#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
-#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
-#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
-#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
-#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK (0xFFFFFF << I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
-#define I40E_GLHMC_VFPEXFFLCNT(_i) (0x000Cd100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFPEXFFLCNT_MAX_INDEX 31
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT 0
-#define I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_MASK (0x1FFFFFFF << I40E_GLHMC_VFPEXFFLCNT_FPMPEXFFLCNT_SHIFT)
-#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
-#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
-#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK (0xFFF << I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
-#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK (0x1FFF << I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
-#define I40E_PFHMC_ERRORDATA 0x000C0500
+#define I40E_GLHMC_SDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_SDPART_PMSDSIZE_SHIFT)
+#define I40E_PFHMC_ERRORDATA 0x000C0500 /* Reset: PFR */
 #define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT 0
-#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK (0x3FFFFFFF << I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
-#define I40E_PFHMC_ERRORINFO 0x000C0400
+#define I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_MASK I40E_MASK(0x3FFFFFFF, I40E_PFHMC_ERRORDATA_HMC_ERROR_DATA_SHIFT)
+#define I40E_PFHMC_ERRORINFO 0x000C0400 /* Reset: PFR */
 #define I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT 0
-#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK (0x1F << I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_INDEX_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_PMF_INDEX_SHIFT)
 #define I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT 7
-#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK (0x1 << I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
+#define I40E_PFHMC_ERRORINFO_PMF_ISVF_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_PMF_ISVF_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT 8
-#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK (0xF << I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_MASK I40E_MASK(0xF, I40E_PFHMC_ERRORINFO_HMC_ERROR_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT 16
-#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK (0x1F << I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
+#define I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_MASK I40E_MASK(0x1F, I40E_PFHMC_ERRORINFO_HMC_OBJECT_TYPE_SHIFT)
 #define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT 31
-#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK (0x1 << I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
-#define I40E_PFHMC_PDINV 0x000C0300
+#define I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK I40E_MASK(0x1, I40E_PFHMC_ERRORINFO_ERROR_DETECTED_SHIFT)
+#define I40E_PFHMC_PDINV 0x000C0300 /* Reset: PFR */
 #define I40E_PFHMC_PDINV_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_PDINV_PMSDIDX_MASK (0xFFF << I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_PDINV_PMSDIDX_SHIFT)
 #define I40E_PFHMC_PDINV_PMPDIDX_SHIFT 16
-#define I40E_PFHMC_PDINV_PMPDIDX_MASK (0x1FF << I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
-#define I40E_PFHMC_SDCMD 0x000C0000
+#define I40E_PFHMC_PDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_PFHMC_PDINV_PMPDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD 0x000C0000 /* Reset: PFR */
 #define I40E_PFHMC_SDCMD_PMSDIDX_SHIFT 0
-#define I40E_PFHMC_SDCMD_PMSDIDX_MASK (0xFFF << I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_PFHMC_SDCMD_PMSDIDX_SHIFT)
 #define I40E_PFHMC_SDCMD_PMSDWR_SHIFT 31
-#define I40E_PFHMC_SDCMD_PMSDWR_MASK (0x1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
-#define I40E_PFHMC_SDDATAHIGH 0x000C0200
+#define I40E_PFHMC_SDCMD_PMSDWR_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDWR_SHIFT)
+#define I40E_PFHMC_SDDATAHIGH 0x000C0200 /* Reset: PFR */
 #define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT 0
-#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK (0xFFFFFFFF << I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
-#define I40E_PFHMC_SDDATALOW 0x000C0100
+#define I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFHMC_SDDATAHIGH_PMSDDATAHIGH_SHIFT)
+#define I40E_PFHMC_SDDATALOW 0x000C0100 /* Reset: PFR */
 #define I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT 0
-#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDVALID_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT 1
-#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK (0x1 << I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDTYPE_MASK I40E_MASK(0x1, I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT 2
-#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK (0x3FF << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
+#define I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_MASK I40E_MASK(0x3FF, I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT)
 #define I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT 12
-#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK (0xFFFFF << I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
-#define I40E_GL_UFUSE 0x00094008
+#define I40E_PFHMC_SDDATALOW_PMSDDATALOW_MASK I40E_MASK(0xFFFFF, I40E_PFHMC_SDDATALOW_PMSDDATALOW_SHIFT)
+#define I40E_GL_GP_FUSE(_i) (0x0009400C + ((_i) * 4)) /* _i=0...28 */ /* Reset: POR */
+#define I40E_GL_GP_FUSE_MAX_INDEX 28
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT 0
+#define I40E_GL_GP_FUSE_GL_GP_FUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_GP_FUSE_GL_GP_FUSE_SHIFT)
+#define I40E_GL_UFUSE 0x00094008 /* Reset: POR */
 #define I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT 1
-#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK (0x1 << I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
+#define I40E_GL_UFUSE_FOUR_PORT_ENABLE_MASK I40E_MASK(0x1, I40E_GL_UFUSE_FOUR_PORT_ENABLE_SHIFT)
 #define I40E_GL_UFUSE_NIC_ID_SHIFT 2
-#define I40E_GL_UFUSE_NIC_ID_MASK (0x1 << I40E_GL_UFUSE_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_NIC_ID_SHIFT)
 #define I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT 10
-#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
+#define I40E_GL_UFUSE_ULT_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_ULT_LOCKOUT_SHIFT)
 #define I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT 11
-#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK (0x1 << I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
-#define I40E_EMPINT_GPIO_ENA 0x00088188
+#define I40E_GL_UFUSE_CLS_LOCKOUT_MASK I40E_MASK(0x1, I40E_GL_UFUSE_CLS_LOCKOUT_SHIFT)
+#define I40E_EMPINT_GPIO_ENA 0x00088188 /* Reset: POR */
 #define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_EMPINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100
+#define I40E_EMPINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_EMPINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM 0x0003F100 /* Reset: CORER */
 #define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT 0
-#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK (0x3 << I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
+#define I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_MASK I40E_MASK(0x3, I40E_PFGEN_PORTMDIO_NUM_PORT_NUM_SHIFT)
 #define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT 4
-#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK (0x1 << I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
-#define I40E_PFINT_AEQCTL 0x00038700
+#define I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_MASK I40E_MASK(0x1, I40E_PFGEN_PORTMDIO_NUM_VFLINK_STAT_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL 0x00038700 /* Reset: CORER */
 #define I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_CEQCTL(_INTPF) (0x00036800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_PFINT_CEQCTL_MAX_INDEX 511
 #define I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_PFINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_PFINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_PFINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_PFINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_PFINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_PFINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_PFINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_PFINT_DYN_CTL0 0x00038480
+#define I40E_PFINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_PFINT_DYN_CTL0 0x00038480 /* Reset: PFR */
 #define I40E_PFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_PFINT_DYN_CTLN(_INTPF) (0x00034800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_PFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_PFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_PFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_PFINT_GPIO_ENA 0x00088080
+#define I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_PFINT_GPIO_ENA 0x00088080 /* Reset: CORER */
 #define I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT 0
-#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO0_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO0_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT 1
-#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO1_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO1_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT 2
-#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO2_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO2_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT 3
-#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO3_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO3_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT 4
-#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO4_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO4_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT 5
-#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO5_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO5_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT 6
-#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO6_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO6_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT 7
-#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO7_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO7_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT 8
-#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO8_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO8_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT 9
-#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO9_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO9_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT 10
-#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO10_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO10_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT 11
-#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO11_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO11_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT 12
-#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO12_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO12_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT 13
-#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO13_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO13_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT 14
-#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO14_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO14_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT 15
-#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO15_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO15_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT 16
-#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO16_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO16_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT 17
-#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO17_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO17_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT 18
-#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO18_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO18_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT 19
-#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO19_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO19_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT 20
-#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO20_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO20_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT 21
-#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO21_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO21_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT 22
-#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO22_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO22_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT 23
-#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO23_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO23_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT 24
-#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO24_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO24_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT 25
-#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO25_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO25_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT 26
-#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO26_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO26_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT 27
-#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO27_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO27_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT 28
-#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
+#define I40E_PFINT_GPIO_ENA_GPIO28_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO28_ENA_SHIFT)
 #define I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT 29
-#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK (0x1 << I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
-#define I40E_PFINT_ICR0 0x00038780
+#define I40E_PFINT_GPIO_ENA_GPIO29_ENA_MASK I40E_MASK(0x1, I40E_PFINT_GPIO_ENA_GPIO29_ENA_SHIFT)
+#define I40E_PFINT_ICR0 0x00038780 /* Reset: CORER */
 #define I40E_PFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_PFINT_ICR0_INTEVENT_MASK (0x1 << I40E_PFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_PFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_PFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_PFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_PFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_PFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_4_SHIFT 5
-#define I40E_PFINT_ICR0_QUEUE_4_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_4_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_4_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_4_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_5_SHIFT 6
-#define I40E_PFINT_ICR0_QUEUE_5_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_5_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_5_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_5_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_6_SHIFT 7
-#define I40E_PFINT_ICR0_QUEUE_6_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_6_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_6_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_6_SHIFT)
 #define I40E_PFINT_ICR0_QUEUE_7_SHIFT 8
-#define I40E_PFINT_ICR0_QUEUE_7_MASK (0x1 << I40E_PFINT_ICR0_QUEUE_7_SHIFT)
+#define I40E_PFINT_ICR0_QUEUE_7_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_QUEUE_7_SHIFT)
 #define I40E_PFINT_ICR0_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_GRST_MASK (0x1 << I40E_PFINT_ICR0_GRST_SHIFT)
+#define I40E_PFINT_ICR0_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GRST_SHIFT)
 #define I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_GPIO_MASK (0x1 << I40E_PFINT_ICR0_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_VFLR_MASK (0x1 << I40E_PFINT_ICR0_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_SWINT_SHIFT 31
-#define I40E_PFINT_ICR0_SWINT_MASK (0x1 << I40E_PFINT_ICR0_SWINT_SHIFT)
-#define I40E_PFINT_ICR0_ENA 0x00038800
+#define I40E_PFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_SWINT_SHIFT)
+#define I40E_PFINT_ICR0_ENA 0x00038800 /* Reset: CORER */
 #define I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT 16
-#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ECC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ECC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT 19
-#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK (0x1 << I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
+#define I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_MAL_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GRST_SHIFT 20
-#define I40E_PFINT_ICR0_ENA_GRST_MASK (0x1 << I40E_PFINT_ICR0_ENA_GRST_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GRST_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GRST_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT 21
-#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK (0x1 << I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_SHIFT)
 #define I40E_PFINT_ICR0_ENA_GPIO_SHIFT 22
-#define I40E_PFINT_ICR0_ENA_GPIO_MASK (0x1 << I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
+#define I40E_PFINT_ICR0_ENA_GPIO_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_GPIO_SHIFT)
 #define I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT 23
-#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK (0x1 << I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_TIMESYNC_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_TIMESYNC_SHIFT)
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT 24
+#define I40E_PFINT_ICR0_ENA_STORM_DETECT_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_STORM_DETECT_SHIFT)
 #define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT 26
-#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_HMC_ERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_HMC_ERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT 28
-#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK (0x1 << I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_PE_CRITERR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_VFLR_SHIFT 29
-#define I40E_PFINT_ICR0_ENA_VFLR_MASK (0x1 << I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
+#define I40E_PFINT_ICR0_ENA_VFLR_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_VFLR_SHIFT)
 #define I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_PFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_PFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_PFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */
+#define I40E_PFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_PFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_PFINT_ITR0(_i) (0x00038000 + ((_i) * 128)) /* _i=0...2 */ /* Reset: PFR */
 #define I40E_PFINT_ITR0_MAX_INDEX 2
 #define I40E_PFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_PFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4))
+#define I40E_PFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_PFINT_ITRN(_i, _INTPF) (0x00030000 + ((_i) * 2048 + (_INTPF) * 4)) /* _i=0...2, _INTPF=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_ITRN_MAX_INDEX 2
 #define I40E_PFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_PFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_PFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_PFINT_LNKLST0 0x00038500
+#define I40E_PFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_PFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_PFINT_LNKLST0 0x00038500 /* Reset: PFR */
 #define I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_LNKLSTN(_INTPF) (0x00035000 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_LNKLSTN_MAX_INDEX 511
 #define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_PFINT_RATE0 0x00038580
+#define I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_PFINT_RATE0 0x00038580 /* Reset: PFR */
 #define I40E_PFINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATE0_INTERVAL_MASK (0x3F << I40E_PFINT_RATE0_INTERVAL_SHIFT)
+#define I40E_PFINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATE0_INTERVAL_SHIFT)
 #define I40E_PFINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */
+#define I40E_PFINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_PFINT_RATEN(_INTPF) (0x00035800 + ((_INTPF) * 4)) /* _i=0...511 */ /* Reset: PFR */
 #define I40E_PFINT_RATEN_MAX_INDEX 511
 #define I40E_PFINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_PFINT_RATEN_INTERVAL_MASK (0x3F << I40E_PFINT_RATEN_INTERVAL_SHIFT)
+#define I40E_PFINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_PFINT_RATEN_INTERVAL_SHIFT)
 #define I40E_PFINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_PFINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_PFINT_STAT_CTL0 0x00038400
+#define I40E_PFINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_PFINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_PFINT_STAT_CTL0 0x00038400 /* Reset: PFR */
 #define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL(_Q) (0x0003A000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_RQCTL_MAX_INDEX 1535
 #define I40E_QINT_RQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_RQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_RQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_RQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_RQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_RQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_RQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_RQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_RQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_RQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_RQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_RQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_RQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_RQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_RQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_RQCTL_INTEVENT_MASK (0x1 << I40E_QINT_RQCTL_INTEVENT_SHIFT)
-#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QINT_RQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_RQCTL_INTEVENT_SHIFT)
+#define I40E_QINT_TQCTL(_Q) (0x0003C000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QINT_TQCTL_MAX_INDEX 1535
 #define I40E_QINT_TQCTL_MSIX_INDX_SHIFT 0
-#define I40E_QINT_TQCTL_MSIX_INDX_MASK (0xFF << I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_QINT_TQCTL_MSIX_INDX_SHIFT)
 #define I40E_QINT_TQCTL_ITR_INDX_SHIFT 11
-#define I40E_QINT_TQCTL_ITR_INDX_MASK (0x3 << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+#define I40E_QINT_TQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_ITR_INDX_SHIFT)
 #define I40E_QINT_TQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_QINT_TQCTL_MSIX0_INDX_MASK (0x7 << I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
+#define I40E_QINT_TQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_QINT_TQCTL_MSIX0_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_QINT_TQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_QINT_TQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_QINT_TQCTL_CAUSE_ENA_MASK (0x1 << I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
+#define I40E_QINT_TQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_CAUSE_ENA_SHIFT)
 #define I40E_QINT_TQCTL_INTEVENT_SHIFT 31
-#define I40E_QINT_TQCTL_INTEVENT_MASK (0x1 << I40E_QINT_TQCTL_INTEVENT_SHIFT)
-#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QINT_TQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_QINT_TQCTL_INTEVENT_SHIFT)
+#define I40E_VFINT_DYN_CTL0(_VF) (0x0002A400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL0_MAX_INDEX 127
 #define I40E_VFINT_DYN_CTL0_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL0_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL0_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL0_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL0_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VFINT_DYN_CTL0_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN(_INTVF) (0x00024800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN_MAX_INDEX 511
 #define I40E_VFINT_DYN_CTLN_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_DYN_CTLN_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0(_VF) (0x0002BC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_MAX_INDEX 127
 #define I40E_VFINT_ICR0_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR0_INTEVENT_MASK (0x1 << I40E_VFINT_ICR0_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR0_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR0_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR0_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR0_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR0_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR0_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR0_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR0_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_SWINT_SHIFT 31
-#define I40E_VFINT_ICR0_SWINT_MASK (0x1 << I40E_VFINT_ICR0_SWINT_SHIFT)
-#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ICR0_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_SWINT_SHIFT)
+#define I40E_VFINT_ICR0_ENA(_VF) (0x0002C000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA_MAX_INDEX 127
 #define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
-#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */
+#define I40E_VFINT_ICR0_ENA_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA_RSVD_SHIFT)
+#define I40E_VFINT_ITR0(_i, _VF) (0x00028000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...2, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_ITR0_MAX_INDEX 2
 #define I40E_VFINT_ITR0_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR0_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR0_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4))
+#define I40E_VFINT_ITR0_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR0_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN(_i, _INTVF) (0x00020000 + ((_i) * 2048 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...511 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN_MAX_INDEX 2
 #define I40E_VFINT_ITRN_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_ITRN_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL0(_VF) (0x0002A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL0_MAX_INDEX 127
 #define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
-#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL(_VF) (0x0002B800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VPINT_AEQCTL_MAX_INDEX 127
 #define I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_AEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_AEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_AEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_AEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_AEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_AEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_AEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_AEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_AEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_AEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_CEQCTL(_INTVF) (0x00026800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_VPINT_CEQCTL_MAX_INDEX 511
 #define I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT 0
-#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK (0xFF << I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX_INDX_MASK I40E_MASK(0xFF, I40E_VPINT_CEQCTL_MSIX_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_ITR_INDX_SHIFT 11
-#define I40E_VPINT_CEQCTL_ITR_INDX_MASK (0x3 << I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_ITR_INDX_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_ITR_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT 13
-#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK (0x7 << I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_MSIX0_INDX_MASK I40E_MASK(0x7, I40E_VPINT_CEQCTL_MSIX0_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT 16
-#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK (0x7FF << I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_CEQCTL_NEXTQ_INDX_SHIFT)
 #define I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT 27
-#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK (0x3 << I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
+#define I40E_VPINT_CEQCTL_NEXTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_CEQCTL_NEXTQ_TYPE_SHIFT)
 #define I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT 30
-#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK (0x1 << I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
+#define I40E_VPINT_CEQCTL_CAUSE_ENA_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_CAUSE_ENA_SHIFT)
 #define I40E_VPINT_CEQCTL_INTEVENT_SHIFT 31
-#define I40E_VPINT_CEQCTL_INTEVENT_MASK (0x1 << I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
-#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_CEQCTL_INTEVENT_MASK I40E_MASK(0x1, I40E_VPINT_CEQCTL_INTEVENT_SHIFT)
+#define I40E_VPINT_LNKLST0(_VF) (0x0002A800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLST0_MAX_INDEX 127
 #define I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLST0_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_LNKLST0_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_LNKLSTN(_INTVF) (0x00025000 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_LNKLSTN_MAX_INDEX 511
 #define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT 0
-#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK (0x7FF << I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+#define I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK I40E_MASK(0x7FF, I40E_VPINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
 #define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT 11
-#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK (0x3 << I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
-#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK I40E_MASK(0x3, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+#define I40E_VPINT_RATE0(_VF) (0x0002AC00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPINT_RATE0_MAX_INDEX 127
 #define I40E_VPINT_RATE0_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATE0_INTERVAL_MASK (0x3F << I40E_VPINT_RATE0_INTERVAL_SHIFT)
+#define I40E_VPINT_RATE0_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATE0_INTERVAL_SHIFT)
 #define I40E_VPINT_RATE0_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATE0_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
-#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */
+#define I40E_VPINT_RATE0_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATE0_INTRL_ENA_SHIFT)
+#define I40E_VPINT_RATEN(_INTVF) (0x00025800 + ((_INTVF) * 4)) /* _i=0...511 */ /* Reset: VFR */
 #define I40E_VPINT_RATEN_MAX_INDEX 511
 #define I40E_VPINT_RATEN_INTERVAL_SHIFT 0
-#define I40E_VPINT_RATEN_INTERVAL_MASK (0x3F << I40E_VPINT_RATEN_INTERVAL_SHIFT)
+#define I40E_VPINT_RATEN_INTERVAL_MASK I40E_MASK(0x3F, I40E_VPINT_RATEN_INTERVAL_SHIFT)
 #define I40E_VPINT_RATEN_INTRL_ENA_SHIFT 6
-#define I40E_VPINT_RATEN_INTRL_ENA_MASK (0x1 << I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
-#define I40E_GL_RDPU_CNTRL 0x00051060
+#define I40E_VPINT_RATEN_INTRL_ENA_MASK I40E_MASK(0x1, I40E_VPINT_RATEN_INTRL_ENA_SHIFT)
+#define I40E_GL_RDPU_CNTRL 0x00051060 /* Reset: CORER */
 #define I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT 0
-#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK (0x1 << I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
+#define I40E_GL_RDPU_CNTRL_RX_PAD_EN_MASK I40E_MASK(0x1, I40E_GL_RDPU_CNTRL_RX_PAD_EN_SHIFT)
 #define I40E_GL_RDPU_CNTRL_ECO_SHIFT 1
-#define I40E_GL_RDPU_CNTRL_ECO_MASK (0x7FFFFFFF << I40E_GL_RDPU_CNTRL_ECO_SHIFT)
-#define I40E_GLLAN_RCTL_0 0x0012A500
+#define I40E_GL_RDPU_CNTRL_ECO_MASK I40E_MASK(0x7FFFFFFF, I40E_GL_RDPU_CNTRL_ECO_SHIFT)
+#define I40E_GLLAN_RCTL_0 0x0012A500 /* Reset: CORER */
 #define I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT 0
-#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK (0x1 << I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
-#define I40E_GLLAN_TSOMSK_F 0x000442D8
+#define I40E_GLLAN_RCTL_0_PXE_MODE_MASK I40E_MASK(0x1, I40E_GLLAN_RCTL_0_PXE_MODE_SHIFT)
+#define I40E_GLLAN_TSOMSK_F 0x000442D8 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT 0
-#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK (0xFFF << I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
-#define I40E_GLLAN_TSOMSK_L 0x000442E0
+#define I40E_GLLAN_TSOMSK_F_TCPMSKF_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_F_TCPMSKF_SHIFT)
+#define I40E_GLLAN_TSOMSK_L 0x000442E0 /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT 0
-#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK (0xFFF << I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
-#define I40E_GLLAN_TSOMSK_M 0x000442DC
+#define I40E_GLLAN_TSOMSK_L_TCPMSKL_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_L_TCPMSKL_SHIFT)
+#define I40E_GLLAN_TSOMSK_M 0x000442DC /* Reset: CORER */
 #define I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT 0
-#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK (0xFFF << I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
-#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000E6500 + ((_i) * 4)) /* i=0..11 */
+#define I40E_GLLAN_TSOMSK_M_TCPMSKM_MASK I40E_MASK(0xFFF, I40E_GLLAN_TSOMSK_M_TCPMSKM_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS(_i) (0x000e6500 + ((_i) * 4)) /* _i=0...11 */ /* Reset: CORER */
+#define I40E_GLLAN_TXPRE_QDIS_MAX_INDEX 11
 #define I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT 0
-#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK (0x7FF << I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QINDX_MASK I40E_MASK(0x7FF, I40E_GLLAN_TXPRE_QDIS_QINDX_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT 16
+#define I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_QDIS_STAT_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
-#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
 #define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK (0x1 << I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
-
-#define I40E_PFLAN_QALLOC 0x001C0400
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
 #define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
-#define I40E_PFLAN_QALLOC_FIRSTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
-#define I40E_PFLAN_QALLOC_LASTQ_MASK (0x7FF << I40E_PFLAN_QALLOC_LASTQ_SHIFT)
+#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
 #define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK (0x1 << I40E_PFLAN_QALLOC_VALID_SHIFT)
-#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QRX_ENA_MAX_INDEX 1535
 #define I40E_QRX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QRX_ENA_QENA_REQ_MASK (0x1 << I40E_QRX_ENA_QENA_REQ_SHIFT)
+#define I40E_QRX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_REQ_SHIFT)
 #define I40E_QRX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QRX_ENA_FAST_QDIS_MASK (0x1 << I40E_QRX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QRX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QRX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QRX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QRX_ENA_QENA_STAT_MASK (0x1 << I40E_QRX_ENA_QENA_STAT_SHIFT)
-#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QRX_ENA_QENA_STAT_SHIFT)
+#define I40E_QRX_TAIL(_Q) (0x00128000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QRX_TAIL_MAX_INDEX 1535
 #define I40E_QRX_TAIL_TAIL_SHIFT 0
-#define I40E_QRX_TAIL_TAIL_MASK (0x1FFF << I40E_QRX_TAIL_TAIL_SHIFT)
-#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QRX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL_TAIL_SHIFT)
+#define I40E_QTX_CTL(_Q) (0x00104000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_CTL_MAX_INDEX 1535
 #define I40E_QTX_CTL_PFVF_Q_SHIFT 0
-#define I40E_QTX_CTL_PFVF_Q_MASK (0x3 << I40E_QTX_CTL_PFVF_Q_SHIFT)
+#define I40E_QTX_CTL_PFVF_Q_MASK I40E_MASK(0x3, I40E_QTX_CTL_PFVF_Q_SHIFT)
 #define I40E_QTX_CTL_PF_INDX_SHIFT 2
-#define I40E_QTX_CTL_PF_INDX_MASK (0xF << I40E_QTX_CTL_PF_INDX_SHIFT)
+#define I40E_QTX_CTL_PF_INDX_MASK I40E_MASK(0xF, I40E_QTX_CTL_PF_INDX_SHIFT)
 #define I40E_QTX_CTL_VFVM_INDX_SHIFT 7
-#define I40E_QTX_CTL_VFVM_INDX_MASK (0x1FF << I40E_QTX_CTL_VFVM_INDX_SHIFT)
-#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_CTL_VFVM_INDX_MASK I40E_MASK(0x1FF, I40E_QTX_CTL_VFVM_INDX_SHIFT)
+#define I40E_QTX_ENA(_Q) (0x00100000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_ENA_MAX_INDEX 1535
 #define I40E_QTX_ENA_QENA_REQ_SHIFT 0
-#define I40E_QTX_ENA_QENA_REQ_MASK (0x1 << I40E_QTX_ENA_QENA_REQ_SHIFT)
+#define I40E_QTX_ENA_QENA_REQ_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_REQ_SHIFT)
 #define I40E_QTX_ENA_FAST_QDIS_SHIFT 1
-#define I40E_QTX_ENA_FAST_QDIS_MASK (0x1 << I40E_QTX_ENA_FAST_QDIS_SHIFT)
+#define I40E_QTX_ENA_FAST_QDIS_MASK I40E_MASK(0x1, I40E_QTX_ENA_FAST_QDIS_SHIFT)
 #define I40E_QTX_ENA_QENA_STAT_SHIFT 2
-#define I40E_QTX_ENA_QENA_STAT_MASK (0x1 << I40E_QTX_ENA_QENA_STAT_SHIFT)
-#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_ENA_QENA_STAT_MASK I40E_MASK(0x1, I40E_QTX_ENA_QENA_STAT_SHIFT)
+#define I40E_QTX_HEAD(_Q) (0x000E4000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: CORER */
 #define I40E_QTX_HEAD_MAX_INDEX 1535
 #define I40E_QTX_HEAD_HEAD_SHIFT 0
-#define I40E_QTX_HEAD_HEAD_MASK (0x1FFF << I40E_QTX_HEAD_HEAD_SHIFT)
+#define I40E_QTX_HEAD_HEAD_MASK I40E_MASK(0x1FFF, I40E_QTX_HEAD_HEAD_SHIFT)
 #define I40E_QTX_HEAD_RS_PENDING_SHIFT 16
-#define I40E_QTX_HEAD_RS_PENDING_MASK (0x1 << I40E_QTX_HEAD_RS_PENDING_SHIFT)
-#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */
+#define I40E_QTX_HEAD_RS_PENDING_MASK I40E_MASK(0x1, I40E_QTX_HEAD_RS_PENDING_SHIFT)
+#define I40E_QTX_TAIL(_Q) (0x00108000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
 #define I40E_QTX_TAIL_MAX_INDEX 1535
 #define I40E_QTX_TAIL_TAIL_SHIFT 0
-#define I40E_QTX_TAIL_TAIL_MASK (0x1FFF << I40E_QTX_TAIL_TAIL_SHIFT)
-#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_QTX_TAIL_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL_TAIL_SHIFT)
+#define I40E_VPLAN_MAPENA(_VF) (0x00074000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_MAPENA_MAX_INDEX 127
 #define I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT 0
-#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK (0x1 << I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
-#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VPLAN_MAPENA_TXRX_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_MAPENA_TXRX_ENA_SHIFT)
+#define I40E_VPLAN_QTABLE(_i, _VF) (0x00070000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: VFR */
 #define I40E_VPLAN_QTABLE_MAX_INDEX 15
 #define I40E_VPLAN_QTABLE_QINDEX_SHIFT 0
-#define I40E_VPLAN_QTABLE_QINDEX_MASK (0x7FF << I40E_VPLAN_QTABLE_QINDEX_SHIFT)
-#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPLAN_QTABLE_QINDEX_MASK I40E_MASK(0x7FF, I40E_VPLAN_QTABLE_QINDEX_SHIFT)
+#define I40E_VSILAN_QBASE(_VSI) (0x0020C800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QBASE_MAX_INDEX 383
 #define I40E_VSILAN_QBASE_VSIBASE_SHIFT 0
-#define I40E_VSILAN_QBASE_VSIBASE_MASK (0x7FF << I40E_VSILAN_QBASE_VSIBASE_SHIFT)
+#define I40E_VSILAN_QBASE_VSIBASE_MASK I40E_MASK(0x7FF, I40E_VSILAN_QBASE_VSIBASE_SHIFT)
 #define I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT 11
-#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK (0x1 << I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
-#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VSILAN_QBASE_VSIQTABLE_ENA_SHIFT)
+#define I40E_VSILAN_QTABLE(_i, _VSI) (0x00200000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...7, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSILAN_QTABLE_MAX_INDEX 7
 #define I40E_VSILAN_QTABLE_QINDEX_0_SHIFT 0
-#define I40E_VSILAN_QTABLE_QINDEX_0_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
+#define I40E_VSILAN_QTABLE_QINDEX_0_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_0_SHIFT)
 #define I40E_VSILAN_QTABLE_QINDEX_1_SHIFT 16
-#define I40E_VSILAN_QTABLE_QINDEX_1_MASK (0x7FF << I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
-#define I40E_PRTGL_SAH 0x001E2140
+#define I40E_VSILAN_QTABLE_QINDEX_1_MASK I40E_MASK(0x7FF, I40E_VSILAN_QTABLE_QINDEX_1_SHIFT)
+#define I40E_PRTGL_SAH 0x001E2140 /* Reset: GLOBR */
 #define I40E_PRTGL_SAH_FC_SAH_SHIFT 0
-#define I40E_PRTGL_SAH_FC_SAH_MASK (0xFFFF << I40E_PRTGL_SAH_FC_SAH_SHIFT)
+#define I40E_PRTGL_SAH_FC_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_FC_SAH_SHIFT)
 #define I40E_PRTGL_SAH_MFS_SHIFT 16
-#define I40E_PRTGL_SAH_MFS_MASK (0xFFFF << I40E_PRTGL_SAH_MFS_SHIFT)
-#define I40E_PRTGL_SAL 0x001E2120
+#define I40E_PRTGL_SAH_MFS_MASK I40E_MASK(0xFFFF, I40E_PRTGL_SAH_MFS_SHIFT)
+#define I40E_PRTGL_SAL 0x001E2120 /* Reset: GLOBR */
 #define I40E_PRTGL_SAL_FC_SAL_SHIFT 0
-#define I40E_PRTGL_SAL_FC_SAL_MASK (0xFFFFFFFF << I40E_PRTGL_SAL_FC_SAL_SHIFT)
-#define I40E_PRTMAC_HLCTLA 0x001E4760
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HLCTLA_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HLCTLA_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT 1
-#define I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_MASK (0x1 << I40E_PRTMAC_HLCTLA_RX_FWRD_CTRL_SHIFT)
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT 2
-#define I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_CHOP_OS_PKT_SHIFT)
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HLCTLA_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HLCTLA_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP 0x001E3130
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GCP_HSEC_CTL_RX_CHECK_SA_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP 0x001E3290
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_GPP_HSEC_CTL_RX_CHECK_SA_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP 0x001E3310
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_SA_PPP_HSEC_CTL_RX_CHECK_SA_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP 0x001E3100
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GCP_HSEC_CTL_RX_CHECK_UCAST_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP 0x001E3280
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_GPP_HSEC_CTL_RX_CHECK_UCAST_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP 0x001E3300
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_CHECK_UCAST_PPP_HSEC_CTL_RX_CHECK_UCAST_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0
+#define I40E_PRTGL_SAL_FC_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTGL_SAL_FC_SAL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP 0x001E30E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GCP_HSEC_CTL_RX_ENABLE_GCP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP 0x001E3260 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_GPP_HSEC_CTL_RX_ENABLE_GPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP 0x001E32E0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360
+#define I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_ENABLE_PPP_HSEC_CTL_RX_ENABLE_PPP_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL 0x001E3360 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110
+#define I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_MASK I40E_MASK(0x1, I40E_PRTMAC_HSEC_CTL_RX_FORWARD_CONTROL_HSEC_CTL_RX_FORWARD_CONTROL_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1 0x001E3110 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_HSEC_CTL_RX_PAUSE_DA_UCAST_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2 0x001E3120 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_HSEC_CTL_RX_PAUSE_DA_UCAST_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE 0x001E30C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_ENABLE_HSEC_CTL_RX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1 0x001E3140 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART1_HSEC_CTL_RX_PAUSE_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2 0x001E3150 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE 0x001E3000
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_MASK (0x1 << I40E_PRTMAC_HSEC_CTL_TX_ENABLE_HSEC_CTL_TX_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0
+#define I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_RX_PAUSE_SA_PART2_HSEC_CTL_RX_PAUSE_SA_PART2_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE 0x001E30D0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK (0x1FF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_MASK I40E_MASK(0x1FF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_ENABLE_HSEC_CTL_TX_PAUSE_ENABLE_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA(_i) (0x001E3370 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16))
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_QUANTA_HSEC_CTL_TX_PAUSE_QUANTA_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER(_i) (0x001E3400 + ((_i) * 16)) /* _i=0...8 */ /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MAX_INDEX 8
 #define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0
+#define I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_HSEC_CTL_TX_PAUSE_REFRESH_TIMER_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1 0x001E34B0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK (0xFFFFFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART1_HSEC_CTL_TX_SA_PART1_SHIFT)
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2 0x001E34C0 /* Reset: GLOBR */
 #define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT 0
-#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK (0xFFFF << I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
-#define I40E_PRTMAC_HSECTL1 0x001E3560
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT 0
-#define I40E_PRTMAC_HSECTL1_DROP_US_PKTS_MASK (0x1 << I40E_PRTMAC_HSECTL1_DROP_US_PKTS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT 3
-#define I40E_PRTMAC_HSECTL1_PAD_US_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_PAD_US_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT 4
-#define I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_MASK (0x7 << I40E_PRTMAC_HSECTL1_TX_HYSTERESIS_SHIFT)
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT 7
-#define I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_MASK (0x1 << I40E_PRTMAC_HSECTL1_HYS_FLUSH_PKT_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT 30
-#define I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_SFD_CHECK_SHIFT)
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT 31
-#define I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_MASK (0x1 << I40E_PRTMAC_HSECTL1_EN_PREAMBLE_CHECK_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480
+#define I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_HSEC_CTL_TX_SA_PART2_HSEC_CTL_TX_SA_PART2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A 0x0008C480 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484
+#define I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_A_SWAP_RX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B 0x0008C484 /* Reset: GLOBR */
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT 0
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT 2
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT 4
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT 6
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_TX_LANE0_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT 8
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE3_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT 10
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE2_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT 12
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE1_SHIFT)
 #define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT 14
-#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK (0x3 << I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
-#define I40E_GL_MNG_FWSM 0x000B6134
-#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 1
-#define I40E_GL_MNG_FWSM_FW_MODES_MASK (0x7 << I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 6
-#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK (0x1 << I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
+#define I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_MASK I40E_MASK(0x3, I40E_PRTMAC_PCS_XAUI_SWAP_B_SWAP_RX_LANE0_SHIFT)
+#define I40E_GL_FWRESETCNT 0x00083100 /* Reset: POR */
+#define I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT 0
+#define I40E_GL_FWRESETCNT_FWRESETCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FWRESETCNT_FWRESETCNT_SHIFT)
+#define I40E_GL_MNG_FWSM 0x000B6134 /* Reset: POR */
+#define I40E_GL_MNG_FWSM_FW_MODES_SHIFT 0
+#define I40E_GL_MNG_FWSM_FW_MODES_MASK I40E_MASK(0x3, I40E_GL_MNG_FWSM_FW_MODES_SHIFT)
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT 10
+#define I40E_GL_MNG_FWSM_EEP_RELOAD_IND_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_EEP_RELOAD_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT 11
-#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK (0xF << I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
+#define I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_MASK I40E_MASK(0xF, I40E_GL_MNG_FWSM_CRC_ERROR_MODULE_SHIFT)
 #define I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT 15
-#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK (0x1 << I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
+#define I40E_GL_MNG_FWSM_FW_STATUS_VALID_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_FW_STATUS_VALID_SHIFT)
 #define I40E_GL_MNG_FWSM_RESET_CNT_SHIFT 16
-#define I40E_GL_MNG_FWSM_RESET_CNT_MASK (0x7 << I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
+#define I40E_GL_MNG_FWSM_RESET_CNT_MASK I40E_MASK(0x7, I40E_GL_MNG_FWSM_RESET_CNT_SHIFT)
 #define I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT 19
-#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK (0x3F << I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
-#define I40E_GL_MNG_FWSM_RSVD_SHIFT 25
-#define I40E_GL_MNG_FWSM_RSVD_MASK (0x1 << I40E_GL_MNG_FWSM_RSVD_SHIFT)
+#define I40E_GL_MNG_FWSM_EXT_ERR_IND_MASK I40E_MASK(0x3F, I40E_GL_MNG_FWSM_EXT_ERR_IND_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT 26
-#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES0_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT 27
-#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES1_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT 28
-#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES2_CONFIG_ERR_SHIFT)
 #define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT 29
-#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK (0x1 << I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
-#define I40E_GL_MNG_HWARB_CTRL 0x000B6130
+#define I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_MASK I40E_MASK(0x1, I40E_GL_MNG_FWSM_PHY_SERDES3_CONFIG_ERR_SHIFT)
+#define I40E_GL_MNG_HWARB_CTRL 0x000B6130 /* Reset: POR */
 #define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT 0
-#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK (0x1 << I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
-#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */
+#define I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_MASK I40E_MASK(0x1, I40E_GL_MNG_HWARB_CTRL_NCSI_ARB_EN_SHIFT)
+#define I40E_PRT_MNG_FTFT_DATA(_i) (0x000852A0 + ((_i) * 32)) /* _i=0...31 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_DATA_MAX_INDEX 31
 #define I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT 0
-#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK (0xFFFFFFFF << I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
-#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260
+#define I40E_PRT_MNG_FTFT_DATA_DWORD_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_FTFT_DATA_DWORD_SHIFT)
+#define I40E_PRT_MNG_FTFT_LENGTH 0x00085260 /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK (0xFF << I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_FTFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PRT_MNG_FTFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PRT_MNG_FTFT_MASK(_i) (0x00085160 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_FTFT_MASK_MAX_INDEX 7
 #define I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT 0
-#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK (0xFFFF << I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
-#define I40E_PRT_MNG_MANC 0x00256A20
+#define I40E_PRT_MNG_FTFT_MASK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_FTFT_MASK_MASK_SHIFT)
+#define I40E_PRT_MNG_MANC 0x00256A20 /* Reset: POR */
 #define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT 0
-#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FLOW_CONTROL_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT 1
-#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK (0x1 << I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
+#define I40E_PRT_MNG_MANC_NCSI_DISCARD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NCSI_DISCARD_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT 17
-#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_TCO_EN_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_TCO_EN_SHIFT)
 #define I40E_PRT_MNG_MANC_RCV_ALL_SHIFT 19
-#define I40E_PRT_MNG_MANC_RCV_ALL_MASK (0x1 << I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
+#define I40E_PRT_MNG_MANC_RCV_ALL_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_RCV_ALL_SHIFT)
 #define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT 25
-#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_FIXED_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_FIXED_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_NET_TYPE_SHIFT 26
-#define I40E_PRT_MNG_MANC_NET_TYPE_MASK (0x1 << I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
+#define I40E_PRT_MNG_MANC_NET_TYPE_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_NET_TYPE_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT 28
-#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
+#define I40E_PRT_MNG_MANC_EN_BMC2OS_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2OS_SHIFT)
 #define I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT 29
-#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK (0x1 << I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
-#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRT_MNG_MANC_EN_BMC2NET_MASK I40E_MASK(0x1, I40E_PRT_MNG_MANC_EN_BMC2NET_SHIFT)
+#define I40E_PRT_MNG_MAVTV(_i) (0x00255900 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MAVTV_MAX_INDEX 7
 #define I40E_PRT_MNG_MAVTV_VID_SHIFT 0
-#define I40E_PRT_MNG_MAVTV_VID_MASK (0xFFF << I40E_PRT_MNG_MAVTV_VID_SHIFT)
-#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32))
+#define I40E_PRT_MNG_MAVTV_VID_MASK I40E_MASK(0xFFF, I40E_PRT_MNG_MAVTV_VID_SHIFT)
+#define I40E_PRT_MNG_MDEF(_i) (0x00255D00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT 4
-#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT 5
-#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK (0xFF << I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_VLAN_AND_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MDEF_VLAN_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT 13
-#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV4_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT 17
-#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK (0xF << I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_IPV6_ADDRESS_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT 21
-#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK (0xF << I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_MAC_EXACT_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_MAC_EXACT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_BROADCAST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_BROADCAST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT 26
-#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK (0x1 << I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_MULTICAST_AND_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_MULTICAST_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_REQUEST_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_ARP_RESPONSE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT 29
-#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_NEIGHBOR_DISCOVERY_134_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT 30
-#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_PORT_0X298_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X298_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT 31
-#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
-#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32))
+#define I40E_PRT_MNG_MDEF_PORT_0X26F_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_PORT_0X26F_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT(_i) (0x00255F00 + ((_i) * 32)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEF_EXT_MAX_INDEX 7
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT 0
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_AND_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT 4
-#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK (0xF << I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_MASK I40E_MASK(0xF, I40E_PRT_MNG_MDEF_EXT_L2_ETHERTYPE_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT 8
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK (0xFFFF << I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEF_EXT_FLEX_PORT_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT 24
-#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_FLEX_TCO_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT 25
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_135_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT 26
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_136_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT 27
-#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_NEIGHBOR_DISCOVERY_137_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT 28
-#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_ICMP_OR_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_ICMP_OR_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT 29
-#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_MLD_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_MLD_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT 30
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_NETWORK_TRAFFIC_SHIFT)
 #define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT 31
-#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK (0x1 << I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
-#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_MASK I40E_MASK(0x1, I40E_PRT_MNG_MDEF_EXT_APPLY_TO_HOST_TRAFFIC_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI(_i) (0x00256580 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MDEFVSI_MAX_INDEX 3
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT 0
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2N_SHIFT)
 #define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT 16
-#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK (0xFFFF << I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
-#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MDEFVSI_MDEFVSI_2NP1_SHIFT)
+#define I40E_PRT_MNG_METF(_i) (0x00256780 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_METF_MAX_INDEX 3
 #define I40E_PRT_MNG_METF_ETYPE_SHIFT 0
-#define I40E_PRT_MNG_METF_ETYPE_MASK (0xFFFF << I40E_PRT_MNG_METF_ETYPE_SHIFT)
+#define I40E_PRT_MNG_METF_ETYPE_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_METF_ETYPE_SHIFT)
 #define I40E_PRT_MNG_METF_POLARITY_SHIFT 30
-#define I40E_PRT_MNG_METF_POLARITY_MASK (0x1 << I40E_PRT_MNG_METF_POLARITY_SHIFT)
-#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_METF_POLARITY_MASK I40E_MASK(0x1, I40E_PRT_MNG_METF_POLARITY_SHIFT)
+#define I40E_PRT_MNG_MFUTP(_i) (0x00254E00 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MFUTP_MAX_INDEX 15
 #define I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT 0
-#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK (0xFFFF << I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
+#define I40E_PRT_MNG_MFUTP_MFUTP_N_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MFUTP_MFUTP_N_SHIFT)
 #define I40E_PRT_MNG_MFUTP_UDP_SHIFT 16
-#define I40E_PRT_MNG_MFUTP_UDP_MASK (0x1 << I40E_PRT_MNG_MFUTP_UDP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_UDP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_TCP_SHIFT 17
-#define I40E_PRT_MNG_MFUTP_TCP_MASK (0x1 << I40E_PRT_MNG_MFUTP_TCP_SHIFT)
+#define I40E_PRT_MNG_MFUTP_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_TCP_SHIFT)
 #define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT 18
-#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK (0x1 << I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
-#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_MASK I40E_MASK(0x1, I40E_PRT_MNG_MFUTP_SOURCE_DESTINATION_SHIFT)
+#define I40E_PRT_MNG_MIPAF4(_i) (0x00256280 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF4_MAX_INDEX 3
 #define I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */
+#define I40E_PRT_MNG_MIPAF4_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF4_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MIPAF6(_i) (0x00254200 + ((_i) * 32)) /* _i=0...15 */ /* Reset: POR */
 #define I40E_PRT_MNG_MIPAF6_MAX_INDEX 15
 #define I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT 0
-#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK (0xFFFFFFFF << I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
-#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MIPAF6_MIPAF_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MIPAF6_MIPAF_SHIFT)
+#define I40E_PRT_MNG_MMAH(_i) (0x00256380 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAH_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAH_MMAH_SHIFT 0
-#define I40E_PRT_MNG_MMAH_MMAH_MASK (0xFFFF << I40E_PRT_MNG_MMAH_MMAH_SHIFT)
-#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRT_MNG_MMAH_MMAH_MASK I40E_MASK(0xFFFF, I40E_PRT_MNG_MMAH_MMAH_SHIFT)
+#define I40E_PRT_MNG_MMAL(_i) (0x00256480 + ((_i) * 32)) /* _i=0...3 */ /* Reset: POR */
 #define I40E_PRT_MNG_MMAL_MAX_INDEX 3
 #define I40E_PRT_MNG_MMAL_MMAL_SHIFT 0
-#define I40E_PRT_MNG_MMAL_MMAL_MASK (0xFFFFFFFF << I40E_PRT_MNG_MMAL_MMAL_SHIFT)
-#define I40E_PRT_MNG_MNGONLY 0x00256A60
+#define I40E_PRT_MNG_MMAL_MMAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRT_MNG_MMAL_MMAL_SHIFT)
+#define I40E_PRT_MNG_MNGONLY 0x00256A60 /* Reset: POR */
 #define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT 0
-#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK (0xFF << I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
-#define I40E_PRT_MNG_MSFM 0x00256AA0
+#define I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_MASK I40E_MASK(0xFF, I40E_PRT_MNG_MNGONLY_EXCLUSIVE_TO_MANAGEABILITY_SHIFT)
+#define I40E_PRT_MNG_MSFM 0x00256AA0 /* Reset: POR */
 #define I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT 0
-#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT 1
-#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_26F_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_26F_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT 2
-#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_UDP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_UDP_SHIFT)
 #define I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT 3
-#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK (0x1 << I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
+#define I40E_PRT_MNG_MSFM_PORT_298_TCP_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_PORT_298_TCP_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT 4
-#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_0_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_0_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT 5
-#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_1_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_1_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT 6
-#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
+#define I40E_PRT_MNG_MSFM_IPV6_2_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_2_MASK_SHIFT)
 #define I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT 7
-#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK (0x1 << I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
-#define I40E_MSIX_PBA(_i) (0x00004900 + ((_i) * 4)) /* _i=0...5 */
+#define I40E_PRT_MNG_MSFM_IPV6_3_MASK_MASK I40E_MASK(0x1, I40E_PRT_MNG_MSFM_IPV6_3_MASK_SHIFT)
+#define I40E_MSIX_PBA(_i) (0x00001000 + ((_i) * 4)) /* _i=0...5 */ /* Reset: FLR */
 #define I40E_MSIX_PBA_MAX_INDEX 5
 #define I40E_MSIX_PBA_PENBIT_SHIFT 0
-#define I40E_MSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_MSIX_PBA_PENBIT_SHIFT)
-#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_PBA_PENBIT_SHIFT)
+#define I40E_MSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TADD_MAX_INDEX 128
 #define I40E_MSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_MSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_MSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_MSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_MSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_MSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_MSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_MSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_MSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_MSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TMSG_MAX_INDEX 128
 #define I40E_MSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_MSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_MSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TUADD_MAX_INDEX 128
 #define I40E_MSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_MSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */
+#define I40E_MSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_MSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_MSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...128 */ /* Reset: FLR */
 #define I40E_MSIX_TVCTRL_MAX_INDEX 128
 #define I40E_MSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_MSIX_TVCTRL_MASK_MASK (0x1 << I40E_MSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFMSIX_PBA1(_i) (0x00004944 + ((_i) * 4)) /* _i=0...19 */
+#define I40E_MSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_MSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFMSIX_PBA1(_i) (0x00002000 + ((_i) * 4)) /* _i=0...19 */ /* Reset: VFLR */
 #define I40E_VFMSIX_PBA1_MAX_INDEX 19
 #define I40E_VFMSIX_PBA1_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA1_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA1_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_PBA1_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA1_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD1(_i) (0x00002100 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD1_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD1_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD1_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD1_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TADD1_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD1_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG1(_i) (0x00002108 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG1_MAX_INDEX 639
 #define I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TMSG1_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG1_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD1(_i) (0x00002104 + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD1_MAX_INDEX 639
 #define I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */
+#define I40E_VFMSIX_TUADD1_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD1_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL1(_i) (0x0000210C + ((_i) * 16)) /* _i=0...639 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL1_MAX_INDEX 639
 #define I40E_VFMSIX_TVCTRL1_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL1_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
-#define I40E_GLNVM_FLA 0x000B6108
+#define I40E_VFMSIX_TVCTRL1_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL1_MASK_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
 #define I40E_GLNVM_FLA_FL_SCK_SHIFT 0
-#define I40E_GLNVM_FLA_FL_SCK_MASK (0x1 << I40E_GLNVM_FLA_FL_SCK_SHIFT)
+#define I40E_GLNVM_FLA_FL_SCK_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SCK_SHIFT)
 #define I40E_GLNVM_FLA_FL_CE_SHIFT 1
-#define I40E_GLNVM_FLA_FL_CE_MASK (0x1 << I40E_GLNVM_FLA_FL_CE_SHIFT)
+#define I40E_GLNVM_FLA_FL_CE_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_CE_SHIFT)
 #define I40E_GLNVM_FLA_FL_SI_SHIFT 2
-#define I40E_GLNVM_FLA_FL_SI_MASK (0x1 << I40E_GLNVM_FLA_FL_SI_SHIFT)
+#define I40E_GLNVM_FLA_FL_SI_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SI_SHIFT)
 #define I40E_GLNVM_FLA_FL_SO_SHIFT 3
-#define I40E_GLNVM_FLA_FL_SO_MASK (0x1 << I40E_GLNVM_FLA_FL_SO_SHIFT)
+#define I40E_GLNVM_FLA_FL_SO_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_SO_SHIFT)
 #define I40E_GLNVM_FLA_FL_REQ_SHIFT 4
-#define I40E_GLNVM_FLA_FL_REQ_MASK (0x1 << I40E_GLNVM_FLA_FL_REQ_SHIFT)
+#define I40E_GLNVM_FLA_FL_REQ_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_REQ_SHIFT)
 #define I40E_GLNVM_FLA_FL_GNT_SHIFT 5
-#define I40E_GLNVM_FLA_FL_GNT_MASK (0x1 << I40E_GLNVM_FLA_FL_GNT_SHIFT)
+#define I40E_GLNVM_FLA_FL_GNT_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_GNT_SHIFT)
 #define I40E_GLNVM_FLA_LOCKED_SHIFT 6
-#define I40E_GLNVM_FLA_LOCKED_MASK (0x1 << I40E_GLNVM_FLA_LOCKED_SHIFT)
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
 #define I40E_GLNVM_FLA_FL_SADDR_SHIFT 18
-#define I40E_GLNVM_FLA_FL_SADDR_MASK (0x7FF << I40E_GLNVM_FLA_FL_SADDR_SHIFT)
+#define I40E_GLNVM_FLA_FL_SADDR_MASK I40E_MASK(0x7FF, I40E_GLNVM_FLA_FL_SADDR_SHIFT)
 #define I40E_GLNVM_FLA_FL_BUSY_SHIFT 30
-#define I40E_GLNVM_FLA_FL_BUSY_MASK (0x1 << I40E_GLNVM_FLA_FL_BUSY_SHIFT)
+#define I40E_GLNVM_FLA_FL_BUSY_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_BUSY_SHIFT)
 #define I40E_GLNVM_FLA_FL_DER_SHIFT 31
-#define I40E_GLNVM_FLA_FL_DER_MASK (0x1 << I40E_GLNVM_FLA_FL_DER_SHIFT)
-#define I40E_GLNVM_FLASHID 0x000B6104
+#define I40E_GLNVM_FLA_FL_DER_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_FL_DER_SHIFT)
+#define I40E_GLNVM_FLASHID 0x000B6104 /* Reset: POR */
 #define I40E_GLNVM_FLASHID_FLASHID_SHIFT 0
-#define I40E_GLNVM_FLASHID_FLASHID_MASK (0xFFFFFF << I40E_GLNVM_FLASHID_FLASHID_SHIFT)
-#define I40E_GLNVM_GENS 0x000B6100
+#define I40E_GLNVM_FLASHID_FLASHID_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_FLASHID_FLASHID_SHIFT)
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT 31
+#define I40E_GLNVM_FLASHID_FLEEP_PERF_MASK I40E_MASK(0x1, I40E_GLNVM_FLASHID_FLEEP_PERF_SHIFT)
+#define I40E_GLNVM_GENS 0x000B6100 /* Reset: POR */
 #define I40E_GLNVM_GENS_NVM_PRES_SHIFT 0
-#define I40E_GLNVM_GENS_NVM_PRES_MASK (0x1 << I40E_GLNVM_GENS_NVM_PRES_SHIFT)
+#define I40E_GLNVM_GENS_NVM_PRES_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_NVM_PRES_SHIFT)
 #define I40E_GLNVM_GENS_SR_SIZE_SHIFT 5
-#define I40E_GLNVM_GENS_SR_SIZE_MASK (0x7 << I40E_GLNVM_GENS_SR_SIZE_SHIFT)
+#define I40E_GLNVM_GENS_SR_SIZE_MASK I40E_MASK(0x7, I40E_GLNVM_GENS_SR_SIZE_SHIFT)
 #define I40E_GLNVM_GENS_BANK1VAL_SHIFT 8
-#define I40E_GLNVM_GENS_BANK1VAL_MASK (0x1 << I40E_GLNVM_GENS_BANK1VAL_SHIFT)
+#define I40E_GLNVM_GENS_BANK1VAL_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_BANK1VAL_SHIFT)
 #define I40E_GLNVM_GENS_ALT_PRST_SHIFT 23
-#define I40E_GLNVM_GENS_ALT_PRST_MASK (0x1 << I40E_GLNVM_GENS_ALT_PRST_SHIFT)
+#define I40E_GLNVM_GENS_ALT_PRST_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_ALT_PRST_SHIFT)
 #define I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT 25
-#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK (0x1 << I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
-#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */
+#define I40E_GLNVM_GENS_FL_AUTO_RD_MASK I40E_MASK(0x1, I40E_GLNVM_GENS_FL_AUTO_RD_SHIFT)
+#define I40E_GLNVM_PROTCSR(_i) (0x000B6010 + ((_i) * 4)) /* _i=0...59 */ /* Reset: POR */
 #define I40E_GLNVM_PROTCSR_MAX_INDEX 59
 #define I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT 0
-#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK (0xFFFFFF << I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
-#define I40E_GLNVM_SRCTL 0x000B6110
+#define I40E_GLNVM_PROTCSR_ADDR_BLOCK_MASK I40E_MASK(0xFFFFFF, I40E_GLNVM_PROTCSR_ADDR_BLOCK_SHIFT)
+#define I40E_GLNVM_SRCTL 0x000B6110 /* Reset: POR */
 #define I40E_GLNVM_SRCTL_SRBUSY_SHIFT 0
-#define I40E_GLNVM_SRCTL_SRBUSY_MASK (0x1 << I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
+#define I40E_GLNVM_SRCTL_SRBUSY_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_SRBUSY_SHIFT)
 #define I40E_GLNVM_SRCTL_ADDR_SHIFT 14
-#define I40E_GLNVM_SRCTL_ADDR_MASK (0x7FFF << I40E_GLNVM_SRCTL_ADDR_SHIFT)
+#define I40E_GLNVM_SRCTL_ADDR_MASK I40E_MASK(0x7FFF, I40E_GLNVM_SRCTL_ADDR_SHIFT)
 #define I40E_GLNVM_SRCTL_WRITE_SHIFT 29
-#define I40E_GLNVM_SRCTL_WRITE_MASK (0x1 << I40E_GLNVM_SRCTL_WRITE_SHIFT)
+#define I40E_GLNVM_SRCTL_WRITE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_WRITE_SHIFT)
 #define I40E_GLNVM_SRCTL_START_SHIFT 30
-#define I40E_GLNVM_SRCTL_START_MASK (0x1 << I40E_GLNVM_SRCTL_START_SHIFT)
+#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
 #define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK (0x1 << I40E_GLNVM_SRCTL_DONE_SHIFT)
-#define I40E_GLNVM_SRDATA 0x000B6114
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
 #define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
-#define I40E_GLNVM_SRDATA_WRDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_WRDATA_SHIFT)
+#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
 #define I40E_GLNVM_SRDATA_RDDATA_SHIFT 16
-#define I40E_GLNVM_SRDATA_RDDATA_MASK (0xFFFF << I40E_GLNVM_SRDATA_RDDATA_SHIFT)
-#define I40E_GLNVM_ULD 0x000B6008
+#define I40E_GLNVM_SRDATA_RDDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_RDDATA_SHIFT)
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
 #define I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT 0
-#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT 1
-#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIRTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIRTL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT 2
-#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_LCB_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_LCB_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT 3
-#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_CORE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_CORE_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT 4
-#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_GLOBAL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_GLOBAL_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT 5
-#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_POR_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT 6
-#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIE_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT 7
-#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PHY_ANA_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT 8
-#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
+#define I40E_GLNVM_ULD_CONF_EMP_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_EMP_DONE_SHIFT)
 #define I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT 9
-#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK (0x1 << I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
-
-#define I40E_GLPCI_BYTCTH 0x0009C484
+#define I40E_GLNVM_ULD_CONF_PCIALT_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CONF_PCIALT_DONE_SHIFT)
+#define I40E_GLPCI_BYTCTH 0x0009C484 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_BYTCTL 0x0009C488
+#define I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTH_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_BYTCTL 0x0009C488 /* Reset: PCIR */
 #define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT 0
-#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK (0xFFFFFFFF << I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
-#define I40E_GLPCI_CAPCTRL 0x000BE4A4
+#define I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_BYTCTL_PCI_COUNT_BW_BCT_SHIFT)
+#define I40E_GLPCI_CAPCTRL 0x000BE4A4 /* Reset: PCIR */
 #define I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT 0
-#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK (0x1 << I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
-#define I40E_GLPCI_CAPSUP 0x000BE4A8
+#define I40E_GLPCI_CAPCTRL_VPD_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPCTRL_VPD_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP 0x000BE4A8 /* Reset: PCIR */
 #define I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT 0
-#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK (0x1 << I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
+#define I40E_GLPCI_CAPSUP_PCIE_VER_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_PCIE_VER_SHIFT)
 #define I40E_GLPCI_CAPSUP_LTR_EN_SHIFT 2
-#define I40E_GLPCI_CAPSUP_LTR_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_LTR_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LTR_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_TPH_EN_SHIFT 3
-#define I40E_GLPCI_CAPSUP_TPH_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_TPH_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_TPH_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ARI_EN_SHIFT 4
-#define I40E_GLPCI_CAPSUP_ARI_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ARI_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ARI_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IOV_EN_SHIFT 5
-#define I40E_GLPCI_CAPSUP_IOV_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IOV_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IOV_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ACS_EN_SHIFT 6
-#define I40E_GLPCI_CAPSUP_ACS_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ACS_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ACS_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_SEC_EN_SHIFT 7
-#define I40E_GLPCI_CAPSUP_SEC_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_SEC_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_SEC_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT 16
-#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_GEN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_GEN_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT 17
-#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_ECRC_CHK_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_ECRC_CHK_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_IDO_EN_SHIFT 18
-#define I40E_GLPCI_CAPSUP_IDO_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_IDO_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_IDO_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT 19
-#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK (0x1 << I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
+#define I40E_GLPCI_CAPSUP_MSI_MASK_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_MSI_MASK_SHIFT)
 #define I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT 20
-#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK (0x1 << I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
+#define I40E_GLPCI_CAPSUP_CSR_CONF_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_CSR_CONF_EN_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT 30
-#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
+#define I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_SUBSYS_ID_SHIFT)
 #define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT 31
-#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK (0x1 << I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
-#define I40E_GLPCI_CNF 0x000BE4C0
+#define I40E_GLPCI_CAPSUP_LOAD_DEV_ID_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_LOAD_DEV_ID_SHIFT)
+#define I40E_GLPCI_CNF 0x000BE4C0 /* Reset: POR */
 #define I40E_GLPCI_CNF_FLEX10_SHIFT 1
-#define I40E_GLPCI_CNF_FLEX10_MASK (0x1 << I40E_GLPCI_CNF_FLEX10_SHIFT)
+#define I40E_GLPCI_CNF_FLEX10_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_FLEX10_SHIFT)
 #define I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT 2
-#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK (0x1 << I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
-#define I40E_GLPCI_CNF2 0x000BE494
+#define I40E_GLPCI_CNF_WAKE_PIN_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CNF_WAKE_PIN_EN_SHIFT)
+#define I40E_GLPCI_CNF2 0x000BE494 /* Reset: PCIR */
 #define I40E_GLPCI_CNF2_RO_DIS_SHIFT 0
-#define I40E_GLPCI_CNF2_RO_DIS_MASK (0x1 << I40E_GLPCI_CNF2_RO_DIS_SHIFT)
+#define I40E_GLPCI_CNF2_RO_DIS_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_RO_DIS_SHIFT)
 #define I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT 1
-#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK (0x1 << I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
+#define I40E_GLPCI_CNF2_CACHELINE_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_CNF2_CACHELINE_SIZE_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT 2
-#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
+#define I40E_GLPCI_CNF2_MSI_X_PF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT)
 #define I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT 13
-#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK (0x7FF << I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
-#define I40E_GLPCI_DREVID 0x0009C480
+#define I40E_GLPCI_CNF2_MSI_X_VF_N_MASK I40E_MASK(0x7FF, I40E_GLPCI_CNF2_MSI_X_VF_N_SHIFT)
+#define I40E_GLPCI_DREVID 0x0009C480 /* Reset: PCIR */
 #define I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT 0
-#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK (0xFF << I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
-#define I40E_GLPCI_GSCL_1 0x0009C48C
+#define I40E_GLPCI_DREVID_DEFAULT_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_DREVID_DEFAULT_REVID_SHIFT)
+#define I40E_GLPCI_GSCL_1 0x0009C48C /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT 0
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT 1
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT 2
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT 3
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_EN_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT 4
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_0_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_0_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT 5
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_1_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_1_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT 6
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_2_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_2_SHIFT)
 #define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT 7
-#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK (0x1 << I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
+#define I40E_GLPCI_GSCL_1_LBC_ENABLE_3_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_LBC_ENABLE_3_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT 8
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT 9
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_LAT_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT 14
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT 15
-#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK (0x1F << I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
+#define I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_MASK I40E_MASK(0x1F, I40E_GLPCI_GSCL_1_PCI_COUNT_BW_EV_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT 28
-#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_64_BIT_EN_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT 29
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_RESET_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT 30
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_STOP_SHIFT)
 #define I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT 31
-#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK (0x1 << I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
-#define I40E_GLPCI_GSCL_2 0x0009C490
+#define I40E_GLPCI_GSCL_1_GIO_COUNT_START_MASK I40E_MASK(0x1, I40E_GLPCI_GSCL_1_GIO_COUNT_START_SHIFT)
+#define I40E_GLPCI_GSCL_2 0x0009C490 /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT 0
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_0_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT 8
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_1_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT 16
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_2_SHIFT)
 #define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT 24
-#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK (0xFF << I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
-#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_MASK I40E_MASK(0xFF, I40E_GLPCI_GSCL_2_GIO_EVENT_NUM_3_SHIFT)
+#define I40E_GLPCI_GSCL_5_8(_i) (0x0009C494 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCL_5_8_MAX_INDEX 3
 #define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT 0
-#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
+#define I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_THRESHOLD_N_SHIFT)
 #define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT 16
-#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK (0xFFFF << I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
-#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */
+#define I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_MASK I40E_MASK(0xFFFF, I40E_GLPCI_GSCL_5_8_LBC_TIMER_N_SHIFT)
+#define I40E_GLPCI_GSCN_0_3(_i) (0x0009C4A4 + ((_i) * 4)) /* _i=0...3 */ /* Reset: PCIR */
 #define I40E_GLPCI_GSCN_0_3_MAX_INDEX 3
 #define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT 0
-#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK (0xFFFFFFFF << I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
-#define I40E_GLPCI_LATCT 0x0009C4B4
+#define I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_GSCN_0_3_EVENT_COUNTER_SHIFT)
+#define I40E_GLPCI_LATCT 0x0009C4B4 /* Reset: PCIR */
 #define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT 0
-#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK (0xFFFFFFFF << I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
-#define I40E_GLPCI_LBARCTRL 0x000BE484
+#define I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_LATCT_PCI_COUNT_LAT_CT_SHIFT)
+#define I40E_GLPCI_LBARCTRL 0x000BE484 /* Reset: POR */
 #define I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT 0
-#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK (0x1 << I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
+#define I40E_GLPCI_LBARCTRL_PREFBAR_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_PREFBAR_SHIFT)
 #define I40E_GLPCI_LBARCTRL_BAR32_SHIFT 1
-#define I40E_GLPCI_LBARCTRL_BAR32_MASK (0x1 << I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
+#define I40E_GLPCI_LBARCTRL_BAR32_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_BAR32_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT 3
-#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK (0x1 << I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
-#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK (0x3 << I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_FLASH_EXPOSE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_RSVD_4_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_RSVD_4_SHIFT)
 #define I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT 6
-#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
-#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK (0x1 << I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_FL_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_FL_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_RSVD_10_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_RSVD_10_SHIFT)
 #define I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT 11
-#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK (0x7 << I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
-#define I40E_GLPCI_LINKCAP 0x000BE4AC
+#define I40E_GLPCI_LBARCTRL_EXROM_SIZE_MASK I40E_MASK(0x7, I40E_GLPCI_LBARCTRL_EXROM_SIZE_SHIFT)
+#define I40E_GLPCI_LINKCAP 0x000BE4AC /* Reset: PCIR */
 #define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT 0
-#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK (0x3F << I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
+#define I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_MASK I40E_MASK(0x3F, I40E_GLPCI_LINKCAP_LINK_SPEEDS_VECTOR_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT 6
-#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK (0x7 << I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
+#define I40E_GLPCI_LINKCAP_MAX_PAYLOAD_MASK I40E_MASK(0x7, I40E_GLPCI_LINKCAP_MAX_PAYLOAD_SHIFT)
 #define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT 9
-#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK (0xF << I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
-#define I40E_GLPCI_PCIERR 0x000BE4FC
+#define I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_MASK I40E_MASK(0xF, I40E_GLPCI_LINKCAP_MAX_LINK_WIDTH_SHIFT)
+#define I40E_GLPCI_PCIERR 0x000BE4FC /* Reset: PCIR */
 #define I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT 0
-#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK (0xFFFFFFFF << I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
-#define I40E_GLPCI_PCITEST2 0x000BE4BC
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT 0
-#define I40E_GLPCI_PCITEST2_IOV_TEST_MODE_MASK (0x1 << I40E_GLPCI_PCITEST2_IOV_TEST_MODE_SHIFT)
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT 1
-#define I40E_GLPCI_PCITEST2_TAG_ALLOC_MASK (0x1 << I40E_GLPCI_PCITEST2_TAG_ALLOC_SHIFT)
-
-#define I40E_GLPCI_PKTCT 0x0009C4BC
+#define I40E_GLPCI_PCIERR_PCIE_ERR_REP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PCIERR_PCIE_ERR_REP_SHIFT)
+#define I40E_GLPCI_PKTCT 0x0009C4BC /* Reset: PCIR */
 #define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT 0
-#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK (0xFFFFFFFF << I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
-#define I40E_GLPCI_PMSUP 0x000BE4B0
+#define I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_PKTCT_PCI_COUNT_BW_PCT_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ 0x0009C4F4 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_NPQ_NPQ_NUM_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_NPQ_INNER_NPQ_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB 0x0009C4F0 /* Reset: PCIR */
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT 0
+#define I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_MASK I40E_MASK(0x1F, I40E_GLPCI_PM_MUX_PFB_PFB_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT 16
+#define I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_MASK I40E_MASK(0x7, I40E_GLPCI_PM_MUX_PFB_INNER_PORT_SEL_SHIFT)
+#define I40E_GLPCI_PMSUP 0x000BE4B0 /* Reset: PCIR */
 #define I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT 0
-#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
+#define I40E_GLPCI_PMSUP_ASPM_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_ASPM_SUP_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT 2
-#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT 5
-#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_EXIT_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_EXIT_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT 8
-#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L0S_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L0S_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT 11
-#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK (0x7 << I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
+#define I40E_GLPCI_PMSUP_L1_ACC_LAT_MASK I40E_MASK(0x7, I40E_GLPCI_PMSUP_L1_ACC_LAT_SHIFT)
 #define I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT 14
-#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK (0x1 << I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
+#define I40E_GLPCI_PMSUP_SLOT_CLK_MASK I40E_MASK(0x1, I40E_GLPCI_PMSUP_SLOT_CLK_SHIFT)
 #define I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT 15
-#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK (0x3 << I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
-#define I40E_GLPCI_PWRDATA 0x000BE490
+#define I40E_GLPCI_PMSUP_OBFF_SUP_MASK I40E_MASK(0x3, I40E_GLPCI_PMSUP_OBFF_SUP_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC 0x0009C4EC /* Reset: PCIR */
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT 0
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_12_SHIFT)
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT 8
+#define I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_MASK I40E_MASK(0xFF, I40E_GLPCI_PQ_MAX_USED_SPC_GLPCI_PQ_MAX_USED_SPC_13_SHIFT)
+#define I40E_GLPCI_PWRDATA 0x000BE490 /* Reset: PCIR */
 #define I40E_GLPCI_PWRDATA_D0_POWER_SHIFT 0
-#define I40E_GLPCI_PWRDATA_D0_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D0_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D0_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT 8
-#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_COMM_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_COMM_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_D3_POWER_SHIFT 16
-#define I40E_GLPCI_PWRDATA_D3_POWER_MASK (0xFF << I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
+#define I40E_GLPCI_PWRDATA_D3_POWER_MASK I40E_MASK(0xFF, I40E_GLPCI_PWRDATA_D3_POWER_SHIFT)
 #define I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT 24
-#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK (0x3 << I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
-#define I40E_GLPCI_REVID 0x000BE4B4
+#define I40E_GLPCI_PWRDATA_DATA_SCALE_MASK I40E_MASK(0x3, I40E_GLPCI_PWRDATA_DATA_SCALE_SHIFT)
+#define I40E_GLPCI_REVID 0x000BE4B4 /* Reset: PCIR */
 #define I40E_GLPCI_REVID_NVM_REVID_SHIFT 0
-#define I40E_GLPCI_REVID_NVM_REVID_MASK (0xFF << I40E_GLPCI_REVID_NVM_REVID_SHIFT)
-#define I40E_GLPCI_SERH 0x000BE49C
+#define I40E_GLPCI_REVID_NVM_REVID_MASK I40E_MASK(0xFF, I40E_GLPCI_REVID_NVM_REVID_SHIFT)
+#define I40E_GLPCI_SERH 0x000BE49C /* Reset: PCIR */
 #define I40E_GLPCI_SERH_SER_NUM_H_SHIFT 0
-#define I40E_GLPCI_SERH_SER_NUM_H_MASK (0xFFFF << I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
-#define I40E_GLPCI_SERL 0x000BE498
+#define I40E_GLPCI_SERH_SER_NUM_H_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SERH_SER_NUM_H_SHIFT)
+#define I40E_GLPCI_SERL 0x000BE498 /* Reset: PCIR */
 #define I40E_GLPCI_SERL_SER_NUM_L_SHIFT 0
-#define I40E_GLPCI_SERL_SER_NUM_L_MASK (0xFFFFFFFF << I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
-#define I40E_GLPCI_SUBSYSID 0x000BE48C
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT 0
-#define I40E_GLPCI_SUBSYSID_SUB_VEN_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_VEN_ID_SHIFT)
-#define I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT 16
-#define I40E_GLPCI_SUBSYSID_SUB_ID_MASK (0xFFFF << I40E_GLPCI_SUBSYSID_SUB_ID_SHIFT)
-#define I40E_GLPCI_UPADD 0x000BE4F8
+#define I40E_GLPCI_SERL_SER_NUM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SERL_SER_NUM_L_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_0 0x0009C4F8 /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_0_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SPARE_BITS_1 0x0009C4FC /* Reset: PCIR */
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT 0
+#define I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPCI_SPARE_BITS_1_SPARE_BITS_SHIFT)
+#define I40E_GLPCI_SUBVENID 0x000BE48C /* Reset: PCIR */
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT 0
+#define I40E_GLPCI_SUBVENID_SUB_VEN_ID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_SUBVENID_SUB_VEN_ID_SHIFT)
+#define I40E_GLPCI_UPADD 0x000BE4F8 /* Reset: PCIR */
 #define I40E_GLPCI_UPADD_ADDRESS_SHIFT 1
-#define I40E_GLPCI_UPADD_ADDRESS_MASK (0x7FFFFFFF << I40E_GLPCI_UPADD_ADDRESS_SHIFT)
-#define I40E_GLPCI_VFSUP 0x000BE4B8
+#define I40E_GLPCI_UPADD_ADDRESS_MASK I40E_MASK(0x7FFFFFFF, I40E_GLPCI_UPADD_ADDRESS_SHIFT)
+#define I40E_GLPCI_VENDORID 0x000BE518 /* Reset: PCIR */
+#define I40E_GLPCI_VENDORID_VENDORID_SHIFT 0
+#define I40E_GLPCI_VENDORID_VENDORID_MASK I40E_MASK(0xFFFF, I40E_GLPCI_VENDORID_VENDORID_SHIFT)
+#define I40E_GLPCI_VFSUP 0x000BE4B8 /* Reset: PCIR */
 #define I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT 0
-#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK (0x1 << I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
+#define I40E_GLPCI_VFSUP_VF_PREFETCH_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VF_PREFETCH_SHIFT)
 #define I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT 1
-#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK (0x1 << I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
-#define I40E_PF_FUNC_RID 0x0009C000
+#define I40E_GLPCI_VFSUP_VR_BAR_TYPE_MASK I40E_MASK(0x1, I40E_GLPCI_VFSUP_VR_BAR_TYPE_SHIFT)
+#define I40E_PF_FUNC_RID 0x0009C000 /* Reset: PCIR */
 #define I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT 0
-#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK (0x7 << I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_FUNCTION_NUMBER_MASK I40E_MASK(0x7, I40E_PF_FUNC_RID_FUNCTION_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT 3
-#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK (0x1F << I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
+#define I40E_PF_FUNC_RID_DEVICE_NUMBER_MASK I40E_MASK(0x1F, I40E_PF_FUNC_RID_DEVICE_NUMBER_SHIFT)
 #define I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT 8
-#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK (0xFF << I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
-#define I40E_PF_PCI_CIAA 0x0009C080
+#define I40E_PF_FUNC_RID_BUS_NUMBER_MASK I40E_MASK(0xFF, I40E_PF_FUNC_RID_BUS_NUMBER_SHIFT)
+#define I40E_PF_PCI_CIAA 0x0009C080 /* Reset: FLR */
 #define I40E_PF_PCI_CIAA_ADDRESS_SHIFT 0
-#define I40E_PF_PCI_CIAA_ADDRESS_MASK (0xFFF << I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
+#define I40E_PF_PCI_CIAA_ADDRESS_MASK I40E_MASK(0xFFF, I40E_PF_PCI_CIAA_ADDRESS_SHIFT)
 #define I40E_PF_PCI_CIAA_VF_NUM_SHIFT 12
-#define I40E_PF_PCI_CIAA_VF_NUM_MASK (0x7F << I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
-#define I40E_PF_PCI_CIAD 0x0009C100
+#define I40E_PF_PCI_CIAA_VF_NUM_MASK I40E_MASK(0x7F, I40E_PF_PCI_CIAA_VF_NUM_SHIFT)
+#define I40E_PF_PCI_CIAD 0x0009C100 /* Reset: FLR */
 #define I40E_PF_PCI_CIAD_DATA_SHIFT 0
-#define I40E_PF_PCI_CIAD_DATA_MASK (0xFFFFFFFF << I40E_PF_PCI_CIAD_DATA_SHIFT)
-#define I40E_PFPCI_CLASS 0x000BE400
+#define I40E_PF_PCI_CIAD_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_PF_PCI_CIAD_DATA_SHIFT)
+#define I40E_PFPCI_CLASS 0x000BE400 /* Reset: PCIR */
 #define I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT 0
-#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK (0x1 << I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
-#define I40E_PFPCI_CNF 0x000BE000
+#define I40E_PFPCI_CLASS_STORAGE_CLASS_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_STORAGE_CLASS_SHIFT)
+#define I40E_PFPCI_CLASS_RESERVED_1_SHIFT 1
+#define I40E_PFPCI_CLASS_RESERVED_1_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_RESERVED_1_SHIFT)
+#define I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT 2
+#define I40E_PFPCI_CLASS_PF_IS_LAN_MASK I40E_MASK(0x1, I40E_PFPCI_CLASS_PF_IS_LAN_SHIFT)
+#define I40E_PFPCI_CNF 0x000BE000 /* Reset: PCIR */
 #define I40E_PFPCI_CNF_MSI_EN_SHIFT 2
-#define I40E_PFPCI_CNF_MSI_EN_MASK (0x1 << I40E_PFPCI_CNF_MSI_EN_SHIFT)
+#define I40E_PFPCI_CNF_MSI_EN_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_MSI_EN_SHIFT)
 #define I40E_PFPCI_CNF_EXROM_DIS_SHIFT 3
-#define I40E_PFPCI_CNF_EXROM_DIS_MASK (0x1 << I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
+#define I40E_PFPCI_CNF_EXROM_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_EXROM_DIS_SHIFT)
 #define I40E_PFPCI_CNF_IO_BAR_SHIFT 4
-#define I40E_PFPCI_CNF_IO_BAR_MASK (0x1 << I40E_PFPCI_CNF_IO_BAR_SHIFT)
+#define I40E_PFPCI_CNF_IO_BAR_MASK I40E_MASK(0x1, I40E_PFPCI_CNF_IO_BAR_SHIFT)
 #define I40E_PFPCI_CNF_INT_PIN_SHIFT 5
-#define I40E_PFPCI_CNF_INT_PIN_MASK (0x3 << I40E_PFPCI_CNF_INT_PIN_SHIFT)
-#define I40E_PFPCI_FACTPS 0x0009C180
+#define I40E_PFPCI_CNF_INT_PIN_MASK I40E_MASK(0x3, I40E_PFPCI_CNF_INT_PIN_SHIFT)
+#define I40E_PFPCI_DEVID 0x000BE080 /* Reset: PCIR */
+#define I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT 0
+#define I40E_PFPCI_DEVID_PF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_PF_DEV_ID_SHIFT)
+#define I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT 16
+#define I40E_PFPCI_DEVID_VF_DEV_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_DEVID_VF_DEV_ID_SHIFT)
+#define I40E_PFPCI_FACTPS 0x0009C180 /* Reset: FLR */
 #define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT 0
-#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK (0x3 << I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
+#define I40E_PFPCI_FACTPS_FUNC_POWER_STATE_MASK I40E_MASK(0x3, I40E_PFPCI_FACTPS_FUNC_POWER_STATE_SHIFT)
 #define I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT 3
-#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK (0x1 << I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
-#define I40E_PFPCI_FUNC 0x000BE200
+#define I40E_PFPCI_FACTPS_FUNC_AUX_EN_MASK I40E_MASK(0x1, I40E_PFPCI_FACTPS_FUNC_AUX_EN_SHIFT)
+#define I40E_PFPCI_FUNC 0x000BE200 /* Reset: POR */
 #define I40E_PFPCI_FUNC_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT 1
-#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_ALLOW_FUNC_DIS_SHIFT)
 #define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT 2
-#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK (0x1 << I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
-#define I40E_PFPCI_FUNC2 0x000BE180
+#define I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC_DIS_FUNC_ON_PORT_DIS_SHIFT)
+#define I40E_PFPCI_FUNC2 0x000BE180 /* Reset: PCIR */
 #define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT 0
-#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK (0x1 << I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
-#define I40E_PFPCI_ICAUSE 0x0009C200
+#define I40E_PFPCI_FUNC2_EMP_FUNC_DIS_MASK I40E_MASK(0x1, I40E_PFPCI_FUNC2_EMP_FUNC_DIS_SHIFT)
+#define I40E_PFPCI_ICAUSE 0x0009C200 /* Reset: PFR */
 #define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT 0
-#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK (0xFFFFFFFF << I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
-#define I40E_PFPCI_IENA 0x0009C280
+#define I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_ICAUSE_PCIE_ERR_CAUSE_SHIFT)
+#define I40E_PFPCI_IENA 0x0009C280 /* Reset: PFR */
 #define I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT 0
-#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK (0xFFFFFFFF << I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
-#define I40E_PFPCI_PFDEVID 0x000BE080
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_PFDEVID_PF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_PM 0x000BE300
+#define I40E_PFPCI_IENA_PCIE_ERR_EN_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPCI_IENA_PCIE_ERR_EN_SHIFT)
+#define I40E_PFPCI_PF_FLUSH_DONE 0x0009C800 /* Reset: PCIR */
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_PF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_PM 0x000BE300 /* Reset: POR */
 #define I40E_PFPCI_PM_PME_EN_SHIFT 0
-#define I40E_PFPCI_PM_PME_EN_MASK (0x1 << I40E_PFPCI_PM_PME_EN_SHIFT)
-#define I40E_PFPCI_STATUS1 0x000BE280
+#define I40E_PFPCI_PM_PME_EN_MASK I40E_MASK(0x1, I40E_PFPCI_PM_PME_EN_SHIFT)
+#define I40E_PFPCI_STATUS1 0x000BE280 /* Reset: POR */
 #define I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT 0
-#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK (0x1 << I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
-#define I40E_PFPCI_VFDEVID 0x000BE100
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT 0
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_LAN_SHIFT)
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT 16
-#define I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_MASK (0xFFFF << I40E_PFPCI_VFDEVID_VF_DEV_ID_SAN_SHIFT)
-#define I40E_PFPCI_VMINDEX 0x0009C300
+#define I40E_PFPCI_STATUS1_FUNC_VALID_MASK I40E_MASK(0x1, I40E_PFPCI_STATUS1_FUNC_VALID_SHIFT)
+#define I40E_PFPCI_SUBSYSID 0x000BE100 /* Reset: PCIR */
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT 0
+#define I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_PF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT 16
+#define I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_MASK I40E_MASK(0xFFFF, I40E_PFPCI_SUBSYSID_VF_SUBSYS_ID_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE 0x0000E400 /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VF_FLUSH_DONE1(_VF) (0x0009C600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: PCIR */
+#define I40E_PFPCI_VF_FLUSH_DONE1_MAX_INDEX 127
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VF_FLUSH_DONE1_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VM_FLUSH_DONE 0x0009C880 /* Reset: PCIR */
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT 0
+#define I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_MASK I40E_MASK(0x1, I40E_PFPCI_VM_FLUSH_DONE_FLUSH_DONE_SHIFT)
+#define I40E_PFPCI_VMINDEX 0x0009C300 /* Reset: PCIR */
 #define I40E_PFPCI_VMINDEX_VMINDEX_SHIFT 0
-#define I40E_PFPCI_VMINDEX_VMINDEX_MASK (0x1FF << I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
-#define I40E_PFPCI_VMPEND 0x0009C380
+#define I40E_PFPCI_VMINDEX_VMINDEX_MASK I40E_MASK(0x1FF, I40E_PFPCI_VMINDEX_VMINDEX_SHIFT)
+#define I40E_PFPCI_VMPEND 0x0009C380 /* Reset: PCIR */
 #define I40E_PFPCI_VMPEND_PENDING_SHIFT 0
-#define I40E_PFPCI_VMPEND_PENDING_MASK (0x1 << I40E_PFPCI_VMPEND_PENDING_SHIFT)
-#define I40E_GLPE_CPUSTATUS0 0x0000D040
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
-#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
-#define I40E_GLPE_CPUSTATUS1 0x0000D044
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
-#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
-#define I40E_GLPE_CPUSTATUS2 0x0000D048
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
-#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK (0xFFFFFFFF << I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL(_i) (0x0000D480 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPE_PFFLMOBJCTRL_MAX_INDEX 15
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_PFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
-#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
-#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK (0x7 << I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
-#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
-#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
-#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK (0x1 << I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_AEQALLOC 0x00131180
-#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_PFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_PFPE_CCQPHIGH 0x00008200
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_PFPE_CCQPLOW 0x00008180
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_PFPE_CCQPSTATUS 0x00008100
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_PFPE_CQACK 0x00131100
-#define I40E_PFPE_CQACK_PECQID_SHIFT 0
-#define I40E_PFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_PFPE_CQACK_PECQID_SHIFT)
-#define I40E_PFPE_CQARM 0x00131080
-#define I40E_PFPE_CQARM_PECQID_SHIFT 0
-#define I40E_PFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_PFPE_CQARM_PECQID_SHIFT)
-#define I40E_PFPE_CQPDB 0x00008000
-#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_PFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_PFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_PFPE_CQPERRCODES 0x00008880
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_PFPE_CQPTAIL 0x00008080
-#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_PFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_FLMXMITALLOCERR 0x00008900
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
-#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK (0xFFFF << I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
-#define I40E_PFPE_IPCONFIG0 0x00008280
-#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_PFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-
-#define I40E_PFPE_MRTEIDXMASK 0x00008600
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_PFPE_TCPNOWTIMER 0x00008580
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_PFPE_UDACTRL 0x00008700
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
-#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
-#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
-#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
-#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK (0x1 << I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
-#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK (0x1 << I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN 0x00008780
-#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
-#define I40E_PFPE_UDAUCFBQPN_QPN_MASK (0x3FFFF << I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
-#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
-#define I40E_PFPE_UDAUCFBQPN_VALID_MASK (0x1 << I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
-#define I40E_PFPE_WQEALLOC 0x00138C00
-#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_PFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
-#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQACK_MAX_INDEX 127
-#define I40E_VFPE_CQACK_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK_PECQID_SHIFT)
-#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQARM_MAX_INDEX 127
-#define I40E_VFPE_CQARM_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPDB_MAX_INDEX 127
-#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
-#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
-#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG0_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4))
-#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */
-#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
-#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
-#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8))
-#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8))
-#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8))
-#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4))
-#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */
-#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
-#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
-#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK (0xFFFFFF << I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
-#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
-#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
-#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
-#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
-#define I40E_GLPES_RDMARXUNALIGN 0x0001E000
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
-#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK (0xFFFFFFFF << I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
-#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
-#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK (0xFFFFFF << I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
-#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
-#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK (0xFFFFFFFF << I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
-#define I40E_GLPES_TCPRXUNEXPERR 0x0001E008
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT 0
-#define I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_MASK (0xFFFFFF << I40E_GLPES_TCPRXUNEXPERR_TCPRXUNEXPERR_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK (0xFFFFFF << I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
-#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
-#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK (0xFFFFFFFF << I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 4))
-#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 4))
-#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
-#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 4))
-#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
-#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 4))
-#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
-#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK (0xFFFFFF << I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
-#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK (0xFFFF << I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
-#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
-#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
-#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
-#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
-#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
-#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK (0xFFFFFFFF << I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
-#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
-#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK (0xFFFFFF << I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
-#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
-#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
-#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
-#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4))
-#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
-#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK (0xFFFFFF << I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK (0xFFFF << I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
-#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
-#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK (0xFFFF << I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
-#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
-#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK (0xFFFFFFFF << I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK (0xFFFF << I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
-#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 4)) /* _i=0...31 */
-#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
-#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK (0xFFFFFFFF << I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
-#define I40E_PRTPM_EEE_STAT 0x001E4320
+#define I40E_PFPCI_VMPEND_PENDING_MASK I40E_MASK(0x1, I40E_PFPCI_VMPEND_PENDING_SHIFT)
+#define I40E_PRTPM_EEE_STAT 0x001E4320 /* Reset: GLOBR */
 #define I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT 29
-#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK (0x1 << I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
+#define I40E_PRTPM_EEE_STAT_EEE_NEG_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_EEE_NEG_SHIFT)
 #define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT 30
-#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_RX_LPI_STATUS_SHIFT)
 #define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT 31
-#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK (0x1 << I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
-#define I40E_PRTPM_EEEC 0x001E4380
+#define I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_MASK I40E_MASK(0x1, I40E_PRTPM_EEE_STAT_TX_LPI_STATUS_SHIFT)
+#define I40E_PRTPM_EEEC 0x001E4380 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT 16
-#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK (0x3F << I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
+#define I40E_PRTPM_EEEC_TW_WAKE_MIN_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TW_WAKE_MIN_SHIFT)
 #define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT 24
-#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK (0x3 << I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
+#define I40E_PRTPM_EEEC_TX_LU_LPI_DLY_MASK I40E_MASK(0x3, I40E_PRTPM_EEEC_TX_LU_LPI_DLY_SHIFT)
 #define I40E_PRTPM_EEEC_TEEE_DLY_SHIFT 26
-#define I40E_PRTPM_EEEC_TEEE_DLY_MASK (0x3F << I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
-#define I40E_PRTPM_EEEFWD 0x001E4400
+#define I40E_PRTPM_EEEC_TEEE_DLY_MASK I40E_MASK(0x3F, I40E_PRTPM_EEEC_TEEE_DLY_SHIFT)
+#define I40E_PRTPM_EEEFWD 0x001E4400 /* Reset: GLOBR */
 #define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT 31
-#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK (0x1 << I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
-#define I40E_PRTPM_EEER 0x001E4360
+#define I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_MASK I40E_MASK(0x1, I40E_PRTPM_EEEFWD_EEE_FW_CONFIG_DONE_SHIFT)
+#define I40E_PRTPM_EEER 0x001E4360 /* Reset: GLOBR */
 #define I40E_PRTPM_EEER_TW_SYSTEM_SHIFT 0
-#define I40E_PRTPM_EEER_TW_SYSTEM_MASK (0xFFFF << I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
+#define I40E_PRTPM_EEER_TW_SYSTEM_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEER_TW_SYSTEM_SHIFT)
 #define I40E_PRTPM_EEER_TX_LPI_EN_SHIFT 16
-#define I40E_PRTPM_EEER_TX_LPI_EN_MASK (0x1 << I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
-#define I40E_PRTPM_EEETXC 0x001E43E0
+#define I40E_PRTPM_EEER_TX_LPI_EN_MASK I40E_MASK(0x1, I40E_PRTPM_EEER_TX_LPI_EN_SHIFT)
+#define I40E_PRTPM_EEETXC 0x001E43E0 /* Reset: GLOBR */
 #define I40E_PRTPM_EEETXC_TW_PHY_SHIFT 0
-#define I40E_PRTPM_EEETXC_TW_PHY_MASK (0xFFFF << I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
-#define I40E_PRTPM_GC 0x000B8140
+#define I40E_PRTPM_EEETXC_TW_PHY_MASK I40E_MASK(0xFFFF, I40E_PRTPM_EEETXC_TW_PHY_SHIFT)
+#define I40E_PRTPM_GC 0x000B8140 /* Reset: POR */
 #define I40E_PRTPM_GC_EMP_LINK_ON_SHIFT 0
-#define I40E_PRTPM_GC_EMP_LINK_ON_MASK (0x1 << I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
+#define I40E_PRTPM_GC_EMP_LINK_ON_MASK I40E_MASK(0x1, I40E_PRTPM_GC_EMP_LINK_ON_SHIFT)
 #define I40E_PRTPM_GC_MNG_VETO_SHIFT 1
-#define I40E_PRTPM_GC_MNG_VETO_MASK (0x1 << I40E_PRTPM_GC_MNG_VETO_SHIFT)
+#define I40E_PRTPM_GC_MNG_VETO_MASK I40E_MASK(0x1, I40E_PRTPM_GC_MNG_VETO_SHIFT)
 #define I40E_PRTPM_GC_RATD_SHIFT 2
-#define I40E_PRTPM_GC_RATD_MASK (0x1 << I40E_PRTPM_GC_RATD_SHIFT)
+#define I40E_PRTPM_GC_RATD_MASK I40E_MASK(0x1, I40E_PRTPM_GC_RATD_SHIFT)
 #define I40E_PRTPM_GC_LCDMP_SHIFT 3
-#define I40E_PRTPM_GC_LCDMP_MASK (0x1 << I40E_PRTPM_GC_LCDMP_SHIFT)
+#define I40E_PRTPM_GC_LCDMP_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LCDMP_SHIFT)
 #define I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT 31
-#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK (0x1 << I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
-#define I40E_PRTPM_RLPIC 0x001E43A0
+#define I40E_PRTPM_GC_LPLU_ASSERTED_MASK I40E_MASK(0x1, I40E_PRTPM_GC_LPLU_ASSERTED_SHIFT)
+#define I40E_PRTPM_RLPIC 0x001E43A0 /* Reset: GLOBR */
 #define I40E_PRTPM_RLPIC_ERLPIC_SHIFT 0
-#define I40E_PRTPM_RLPIC_ERLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
-#define I40E_PRTPM_TLPIC 0x001E43C0
+#define I40E_PRTPM_RLPIC_ERLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_RLPIC_ERLPIC_SHIFT)
+#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
 #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
-#define I40E_PRTPM_TLPIC_ETLPIC_MASK (0xFFFFFFFF << I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
-#define I40E_GLRPB_DPSS 0x000AC828
+#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
+#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
 #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
-#define I40E_GLRPB_DPSS_DPS_TCN_MASK (0xFFFFF << I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
-#define I40E_GLRPB_GHW 0x000AC830
+#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
+#define I40E_GLRPB_GHW 0x000AC830 /* Reset: CORER */
 #define I40E_GLRPB_GHW_GHW_SHIFT 0
-#define I40E_GLRPB_GHW_GHW_MASK (0xFFFFF << I40E_GLRPB_GHW_GHW_SHIFT)
-#define I40E_GLRPB_GLW 0x000AC834
+#define I40E_GLRPB_GHW_GHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GHW_GHW_SHIFT)
+#define I40E_GLRPB_GLW 0x000AC834 /* Reset: CORER */
 #define I40E_GLRPB_GLW_GLW_SHIFT 0
-#define I40E_GLRPB_GLW_GLW_MASK (0xFFFFF << I40E_GLRPB_GLW_GLW_SHIFT)
-#define I40E_GLRPB_PHW 0x000AC844
+#define I40E_GLRPB_GLW_GLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_GLW_GLW_SHIFT)
+#define I40E_GLRPB_PHW 0x000AC844 /* Reset: CORER */
 #define I40E_GLRPB_PHW_PHW_SHIFT 0
-#define I40E_GLRPB_PHW_PHW_MASK (0xFFFFF << I40E_GLRPB_PHW_PHW_SHIFT)
-#define I40E_GLRPB_PLW 0x000AC848
+#define I40E_GLRPB_PHW_PHW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PHW_PHW_SHIFT)
+#define I40E_GLRPB_PLW 0x000AC848 /* Reset: CORER */
 #define I40E_GLRPB_PLW_PLW_SHIFT 0
-#define I40E_GLRPB_PLW_PLW_MASK (0xFFFFF << I40E_GLRPB_PLW_PLW_SHIFT)
-#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_GLRPB_PLW_PLW_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_PLW_PLW_SHIFT)
+#define I40E_PRTRPB_DHW(_i) (0x000AC100 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DHW_MAX_INDEX 7
 #define I40E_PRTRPB_DHW_DHW_TCN_SHIFT 0
-#define I40E_PRTRPB_DHW_DHW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
-#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DHW_DHW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DHW_DHW_TCN_SHIFT)
+#define I40E_PRTRPB_DLW(_i) (0x000AC220 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DLW_MAX_INDEX 7
 #define I40E_PRTRPB_DLW_DLW_TCN_SHIFT 0
-#define I40E_PRTRPB_DLW_DLW_TCN_MASK (0xFFFFF << I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
-#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DLW_DLW_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DLW_DLW_TCN_SHIFT)
+#define I40E_PRTRPB_DPS(_i) (0x000AC320 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_DPS_MAX_INDEX 7
 #define I40E_PRTRPB_DPS_DPS_TCN_SHIFT 0
-#define I40E_PRTRPB_DPS_DPS_TCN_MASK (0xFFFFF << I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
-#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_DPS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_DPS_DPS_TCN_SHIFT)
+#define I40E_PRTRPB_SHT(_i) (0x000AC480 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SHT_MAX_INDEX 7
 #define I40E_PRTRPB_SHT_SHT_TCN_SHIFT 0
-#define I40E_PRTRPB_SHT_SHT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
-#define I40E_PRTRPB_SHW 0x000AC580
+#define I40E_PRTRPB_SHT_SHT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHT_SHT_TCN_SHIFT)
+#define I40E_PRTRPB_SHW 0x000AC580 /* Reset: CORER */
 #define I40E_PRTRPB_SHW_SHW_SHIFT 0
-#define I40E_PRTRPB_SHW_SHW_MASK (0xFFFFF << I40E_PRTRPB_SHW_SHW_SHIFT)
-#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */
+#define I40E_PRTRPB_SHW_SHW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SHW_SHW_SHIFT)
+#define I40E_PRTRPB_SLT(_i) (0x000AC5A0 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_PRTRPB_SLT_MAX_INDEX 7
 #define I40E_PRTRPB_SLT_SLT_TCN_SHIFT 0
-#define I40E_PRTRPB_SLT_SLT_TCN_MASK (0xFFFFF << I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
-#define I40E_PRTRPB_SLW 0x000AC6A0
+#define I40E_PRTRPB_SLT_SLT_TCN_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLT_SLT_TCN_SHIFT)
+#define I40E_PRTRPB_SLW 0x000AC6A0 /* Reset: CORER */
 #define I40E_PRTRPB_SLW_SLW_SHIFT 0
-#define I40E_PRTRPB_SLW_SLW_MASK (0xFFFFF << I40E_PRTRPB_SLW_SLW_SHIFT)
-#define I40E_PRTRPB_SPS 0x000AC7C0
+#define I40E_PRTRPB_SLW_SLW_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SLW_SLW_SHIFT)
+#define I40E_PRTRPB_SPS 0x000AC7C0 /* Reset: CORER */
 #define I40E_PRTRPB_SPS_SPS_SHIFT 0
-#define I40E_PRTRPB_SPS_SPS_MASK (0xFFFFF << I40E_PRTRPB_SPS_SPS_SHIFT)
-#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */
-#define I40E_GLQF_APBVT_MAX_INDEX 2047
-#define I40E_GLQF_APBVT_APBVT_SHIFT 0
-#define I40E_GLQF_APBVT_APBVT_MASK (0xFFFFFFFF << I40E_GLQF_APBVT_APBVT_SHIFT)
-#define I40E_GLQF_CTL 0x00269BA4
+#define I40E_PRTRPB_SPS_SPS_MASK I40E_MASK(0xFFFFF, I40E_PRTRPB_SPS_SPS_SHIFT)
+#define I40E_GLQF_CTL 0x00269BA4 /* Reset: CORER */
 #define I40E_GLQF_CTL_HTOEP_SHIFT 1
-#define I40E_GLQF_CTL_HTOEP_MASK (0x1 << I40E_GLQF_CTL_HTOEP_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_SHIFT)
 #define I40E_GLQF_CTL_HTOEP_FCOE_SHIFT 2
-#define I40E_GLQF_CTL_HTOEP_FCOE_MASK (0x1 << I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
+#define I40E_GLQF_CTL_HTOEP_FCOE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_HTOEP_FCOE_SHIFT)
 #define I40E_GLQF_CTL_PCNT_ALLOC_SHIFT 3
-#define I40E_GLQF_CTL_PCNT_ALLOC_MASK (0x7 << I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_PCNT_ALLOC_MASK I40E_MASK(0x7, I40E_GLQF_CTL_PCNT_ALLOC_SHIFT)
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT 6
+#define I40E_GLQF_CTL_FD_AUTO_PCTYPE_MASK I40E_MASK(0x1, I40E_GLQF_CTL_FD_AUTO_PCTYPE_SHIFT)
 #define I40E_GLQF_CTL_RSVD_SHIFT 7
-#define I40E_GLQF_CTL_RSVD_MASK (0x1 << I40E_GLQF_CTL_RSVD_SHIFT)
+#define I40E_GLQF_CTL_RSVD_MASK I40E_MASK(0x1, I40E_GLQF_CTL_RSVD_SHIFT)
 #define I40E_GLQF_CTL_MAXPEBLEN_SHIFT 8
-#define I40E_GLQF_CTL_MAXPEBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXPEBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXPEBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFCBLEN_SHIFT 11
-#define I40E_GLQF_CTL_MAXFCBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFCBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFCBLEN_SHIFT)
 #define I40E_GLQF_CTL_MAXFDBLEN_SHIFT 14
-#define I40E_GLQF_CTL_MAXFDBLEN_MASK (0x7 << I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
+#define I40E_GLQF_CTL_MAXFDBLEN_MASK I40E_MASK(0x7, I40E_GLQF_CTL_MAXFDBLEN_SHIFT)
 #define I40E_GLQF_CTL_FDBEST_SHIFT 17
-#define I40E_GLQF_CTL_FDBEST_MASK (0xFF << I40E_GLQF_CTL_FDBEST_SHIFT)
+#define I40E_GLQF_CTL_FDBEST_MASK I40E_MASK(0xFF, I40E_GLQF_CTL_FDBEST_SHIFT)
 #define I40E_GLQF_CTL_PROGPRIO_SHIFT 25
-#define I40E_GLQF_CTL_PROGPRIO_MASK (0x1 << I40E_GLQF_CTL_PROGPRIO_SHIFT)
+#define I40E_GLQF_CTL_PROGPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_PROGPRIO_SHIFT)
 #define I40E_GLQF_CTL_INVALPRIO_SHIFT 26
-#define I40E_GLQF_CTL_INVALPRIO_MASK (0x1 << I40E_GLQF_CTL_INVALPRIO_SHIFT)
+#define I40E_GLQF_CTL_INVALPRIO_MASK I40E_MASK(0x1, I40E_GLQF_CTL_INVALPRIO_SHIFT)
 #define I40E_GLQF_CTL_IGNORE_IP_SHIFT 27
-#define I40E_GLQF_CTL_IGNORE_IP_MASK (0x1 << I40E_GLQF_CTL_IGNORE_IP_SHIFT)
-#define I40E_GLQF_FDCNT_0 0x00269BAC
+#define I40E_GLQF_CTL_IGNORE_IP_MASK I40E_MASK(0x1, I40E_GLQF_CTL_IGNORE_IP_SHIFT)
+#define I40E_GLQF_FDCNT_0 0x00269BAC /* Reset: CORER */
 #define I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT 0
-#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
+#define I40E_GLQF_FDCNT_0_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_GUARANT_CNT_SHIFT)
 #define I40E_GLQF_FDCNT_0_BESTCNT_SHIFT 13
-#define I40E_GLQF_FDCNT_0_BESTCNT_MASK (0x1FFF << I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
-#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */
+#define I40E_GLQF_FDCNT_0_BESTCNT_MASK I40E_MASK(0x1FFF, I40E_GLQF_FDCNT_0_BESTCNT_SHIFT)
+#define I40E_GLQF_HKEY(_i) (0x00270140 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
+#define I40E_GLQF_HKEY_MAX_INDEX 12
+#define I40E_GLQF_HKEY_KEY_0_SHIFT 0
+#define I40E_GLQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_0_SHIFT)
+#define I40E_GLQF_HKEY_KEY_1_SHIFT 8
+#define I40E_GLQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_1_SHIFT)
+#define I40E_GLQF_HKEY_KEY_2_SHIFT 16
+#define I40E_GLQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_2_SHIFT)
+#define I40E_GLQF_HKEY_KEY_3_SHIFT 24
+#define I40E_GLQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_GLQF_HKEY_KEY_3_SHIFT)
+#define I40E_GLQF_HSYM(_i) (0x00269D00 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_HSYM_MAX_INDEX 63
 #define I40E_GLQF_HSYM_SYMH_ENA_SHIFT 0
-#define I40E_GLQF_HSYM_SYMH_ENA_MASK (0x1 << I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
-#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */
+#define I40E_GLQF_HSYM_SYMH_ENA_MASK I40E_MASK(0x1, I40E_GLQF_HSYM_SYMH_ENA_SHIFT)
+#define I40E_GLQF_PCNT(_i) (0x00266800 + ((_i) * 4)) /* _i=0...511 */ /* Reset: CORER */
 #define I40E_GLQF_PCNT_MAX_INDEX 511
 #define I40E_GLQF_PCNT_PCNT_SHIFT 0
-#define I40E_GLQF_PCNT_PCNT_MASK (0xFFFFFFFF << I40E_GLQF_PCNT_PCNT_SHIFT)
-#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */
+#define I40E_GLQF_PCNT_PCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_PCNT_PCNT_SHIFT)
+#define I40E_GLQF_SWAP(_i, _j) (0x00267E00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
 #define I40E_GLQF_SWAP_MAX_INDEX 1
 #define I40E_GLQF_SWAP_OFF0_SRC0_SHIFT 0
-#define I40E_GLQF_SWAP_OFF0_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF0_SRC1_SHIFT 6
-#define I40E_GLQF_SWAP_OFF0_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF0_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF0_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN0_SHIFT 12
-#define I40E_GLQF_SWAP_FLEN0_MASK (0xF << I40E_GLQF_SWAP_FLEN0_SHIFT)
+#define I40E_GLQF_SWAP_FLEN0_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC0_SHIFT 16
-#define I40E_GLQF_SWAP_OFF1_SRC0_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC0_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC0_SHIFT)
 #define I40E_GLQF_SWAP_OFF1_SRC1_SHIFT 22
-#define I40E_GLQF_SWAP_OFF1_SRC1_MASK (0x3F << I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
+#define I40E_GLQF_SWAP_OFF1_SRC1_MASK I40E_MASK(0x3F, I40E_GLQF_SWAP_OFF1_SRC1_SHIFT)
 #define I40E_GLQF_SWAP_FLEN1_SHIFT 28
-#define I40E_GLQF_SWAP_FLEN1_MASK (0xF << I40E_GLQF_SWAP_FLEN1_SHIFT)
-#define I40E_PFQF_CTL_0 0x001C0AC0
+#define I40E_GLQF_SWAP_FLEN1_MASK I40E_MASK(0xF, I40E_GLQF_SWAP_FLEN1_SHIFT)
+#define I40E_PFQF_CTL_0 0x001C0AC0 /* Reset: CORER */
 #define I40E_PFQF_CTL_0_PEHSIZE_SHIFT 0
-#define I40E_PFQF_CTL_0_PEHSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PEDSIZE_SHIFT 5
-#define I40E_PFQF_CTL_0_PEDSIZE_MASK (0x1F << I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_0_PEDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT 10
-#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_PFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT 14
-#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_PFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_PFFCDSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT 16
-#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK (0x1 << I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_HASHLUTSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_FD_ENA_SHIFT 17
-#define I40E_PFQF_CTL_0_FD_ENA_MASK (0x1 << I40E_PFQF_CTL_0_FD_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_FD_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_FD_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT 18
-#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK (0x1 << I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_ETYPE_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_ETYPE_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT 19
-#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK (0x1 << I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
+#define I40E_PFQF_CTL_0_MACVLAN_ENA_MASK I40E_MASK(0x1, I40E_PFQF_CTL_0_MACVLAN_ENA_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT 20
-#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK (0xF << I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
+#define I40E_PFQF_CTL_0_VFFCHSIZE_MASK I40E_MASK(0xF, I40E_PFQF_CTL_0_VFFCHSIZE_SHIFT)
 #define I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT 24
-#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK (0x3 << I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
-#define I40E_PFQF_CTL_1 0x00245D80
+#define I40E_PFQF_CTL_0_VFFCDSIZE_MASK I40E_MASK(0x3, I40E_PFQF_CTL_0_VFFCDSIZE_SHIFT)
+#define I40E_PFQF_CTL_1 0x00245D80 /* Reset: CORER */
 #define I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT 0
-#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK (0x1 << I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
-#define I40E_PFQF_FDALLOC 0x00246280
+#define I40E_PFQF_CTL_1_CLEARFDTABLE_MASK I40E_MASK(0x1, I40E_PFQF_CTL_1_CLEARFDTABLE_SHIFT)
+#define I40E_PFQF_FDALLOC 0x00246280 /* Reset: CORER */
 #define I40E_PFQF_FDALLOC_FDALLOC_SHIFT 0
-#define I40E_PFQF_FDALLOC_FDALLOC_MASK (0xFF << I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
+#define I40E_PFQF_FDALLOC_FDALLOC_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDALLOC_SHIFT)
 #define I40E_PFQF_FDALLOC_FDBEST_SHIFT 8
-#define I40E_PFQF_FDALLOC_FDBEST_MASK (0xFF << I40E_PFQF_FDALLOC_FDBEST_SHIFT)
-#define I40E_PFQF_FDSTAT 0x00246380
+#define I40E_PFQF_FDALLOC_FDBEST_MASK I40E_MASK(0xFF, I40E_PFQF_FDALLOC_FDBEST_SHIFT)
+#define I40E_PFQF_FDSTAT 0x00246380 /* Reset: CORER */
 #define I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT 0
-#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
+#define I40E_PFQF_FDSTAT_GUARANT_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_GUARANT_CNT_SHIFT)
 #define I40E_PFQF_FDSTAT_BEST_CNT_SHIFT 16
-#define I40E_PFQF_FDSTAT_BEST_CNT_MASK (0x1FFF << I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
-#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */
+#define I40E_PFQF_FDSTAT_BEST_CNT_MASK I40E_MASK(0x1FFF, I40E_PFQF_FDSTAT_BEST_CNT_SHIFT)
+#define I40E_PFQF_HENA(_i) (0x00245900 + ((_i) * 128)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_PFQF_HENA_MAX_INDEX 1
 #define I40E_PFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_PFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */
+#define I40E_PFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_PFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_PFQF_HKEY(_i) (0x00244800 + ((_i) * 128)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_PFQF_HKEY_MAX_INDEX 12
 #define I40E_PFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_PFQF_HKEY_KEY_0_MASK (0xFF << I40E_PFQF_HKEY_KEY_0_SHIFT)
+#define I40E_PFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_0_SHIFT)
 #define I40E_PFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_PFQF_HKEY_KEY_1_MASK (0xFF << I40E_PFQF_HKEY_KEY_1_SHIFT)
+#define I40E_PFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_1_SHIFT)
 #define I40E_PFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_PFQF_HKEY_KEY_2_MASK (0xFF << I40E_PFQF_HKEY_KEY_2_SHIFT)
+#define I40E_PFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_2_SHIFT)
 #define I40E_PFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_PFQF_HKEY_KEY_3_MASK (0xFF << I40E_PFQF_HKEY_KEY_3_SHIFT)
-#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */
+#define I40E_PFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_PFQF_HKEY_KEY_3_SHIFT)
+#define I40E_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_PFQF_HLUT_MAX_INDEX 127
 #define I40E_PFQF_HLUT_LUT0_SHIFT 0
-#define I40E_PFQF_HLUT_LUT0_MASK (0x3F << I40E_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_PFQF_HLUT_LUT0_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT0_SHIFT)
 #define I40E_PFQF_HLUT_LUT1_SHIFT 8
-#define I40E_PFQF_HLUT_LUT1_MASK (0x3F << I40E_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_PFQF_HLUT_LUT1_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT1_SHIFT)
 #define I40E_PFQF_HLUT_LUT2_SHIFT 16
-#define I40E_PFQF_HLUT_LUT2_MASK (0x3F << I40E_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_PFQF_HLUT_LUT2_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT2_SHIFT)
 #define I40E_PFQF_HLUT_LUT3_SHIFT 24
-#define I40E_PFQF_HLUT_LUT3_MASK (0x3F << I40E_PFQF_HLUT_LUT3_SHIFT)
-#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */
-#define I40E_PFQF_HREGION_MAX_INDEX 7
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
-#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_PFQF_HREGION_REGION_0_MASK (0x7 << I40E_PFQF_HREGION_REGION_0_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
-#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_PFQF_HREGION_REGION_1_MASK (0x7 << I40E_PFQF_HREGION_REGION_1_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
-#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_PFQF_HREGION_REGION_2_MASK (0x7 << I40E_PFQF_HREGION_REGION_2_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
-#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_PFQF_HREGION_REGION_3_MASK (0x7 << I40E_PFQF_HREGION_REGION_3_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
-#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_PFQF_HREGION_REGION_4_MASK (0x7 << I40E_PFQF_HREGION_REGION_4_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
-#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_PFQF_HREGION_REGION_5_MASK (0x7 << I40E_PFQF_HREGION_REGION_5_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
-#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_PFQF_HREGION_REGION_6_MASK (0x7 << I40E_PFQF_HREGION_REGION_6_SHIFT)
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
-#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_PFQF_HREGION_REGION_7_MASK (0x7 << I40E_PFQF_HREGION_REGION_7_SHIFT)
-#define I40E_PRTQF_CTL_0 0x00256E60
+#define I40E_PFQF_HLUT_LUT3_MASK I40E_MASK(0x3F, I40E_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PRTQF_CTL_0 0x00256E60 /* Reset: CORER */
 #define I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT 0
-#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK (0x1 << I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
-#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */
+#define I40E_PRTQF_CTL_0_HSYM_ENA_MASK I40E_MASK(0x1, I40E_PRTQF_CTL_0_HSYM_ENA_SHIFT)
+#define I40E_PRTQF_FD_FLXINSET(_i) (0x00253800 + ((_i) * 32)) /* _i=0...63 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
-#define I40E_PRTQF_FD_FLXINSET_INSET_MASK (0xFF << I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
-#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */
+#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
+#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
 #define I40E_PRTQF_FD_MSK_MAX_INDEX 63
 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
-#define I40E_PRTQF_FD_MSK_MASK_MASK (0xFFFF << I40E_PRTQF_FD_MSK_MASK_SHIFT)
+#define I40E_PRTQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_PRTQF_FD_MSK_MASK_SHIFT)
 #define I40E_PRTQF_FD_MSK_OFFSET_SHIFT 16
-#define I40E_PRTQF_FD_MSK_OFFSET_MASK (0x3F << I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
-#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */
+#define I40E_PRTQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_PRTQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_PRTQF_FLX_PIT(_i) (0x00255200 + ((_i) * 32)) /* _i=0...8 */ /* Reset: CORER */
 #define I40E_PRTQF_FLX_PIT_MAX_INDEX 8
 #define I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT 0
-#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK (0x1F << I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
+#define I40E_PRTQF_FLX_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_SOURCE_OFF_SHIFT)
 #define I40E_PRTQF_FLX_PIT_FSIZE_SHIFT 5
-#define I40E_PRTQF_FLX_PIT_FSIZE_MASK (0x1F << I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
+#define I40E_PRTQF_FLX_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_PRTQF_FLX_PIT_FSIZE_SHIFT)
 #define I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT 10
-#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK (0x3F << I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
-#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_PRTQF_FLX_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_PRTQF_FLX_PIT_DEST_OFF_SHIFT)
+#define I40E_VFQF_HENA1(_i, _VF) (0x00230800 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...1, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HENA1_MAX_INDEX 1
 #define I40E_VFQF_HENA1_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA1_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */
+#define I40E_VFQF_HENA1_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA1_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY1(_i, _VF) (0x00228000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...12, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY1_MAX_INDEX 12
 #define I40E_VFQF_HKEY1_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY1_KEY_0_MASK (0xFF << I40E_VFQF_HKEY1_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY1_KEY_1_MASK (0xFF << I40E_VFQF_HKEY1_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY1_KEY_2_MASK (0xFF << I40E_VFQF_HKEY1_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY1_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY1_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY1_KEY_3_MASK (0xFF << I40E_VFQF_HKEY1_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */
+#define I40E_VFQF_HKEY1_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY1_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT1(_i, _VF) (0x00220000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...15, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT1_MAX_INDEX 15
 #define I40E_VFQF_HLUT1_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT1_LUT0_MASK (0xF << I40E_VFQF_HLUT1_LUT0_SHIFT)
+#define I40E_VFQF_HLUT1_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT0_SHIFT)
 #define I40E_VFQF_HLUT1_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT1_LUT1_MASK (0xF << I40E_VFQF_HLUT1_LUT1_SHIFT)
+#define I40E_VFQF_HLUT1_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT1_SHIFT)
 #define I40E_VFQF_HLUT1_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT1_LUT2_MASK (0xF << I40E_VFQF_HLUT1_LUT2_SHIFT)
+#define I40E_VFQF_HLUT1_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT2_SHIFT)
 #define I40E_VFQF_HLUT1_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT1_LUT3_MASK (0xF << I40E_VFQF_HLUT1_LUT3_SHIFT)
-#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4))
+#define I40E_VFQF_HLUT1_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT1_LUT3_SHIFT)
+#define I40E_VFQF_HREGION1(_i, _VF) (0x0022E000 + ((_i) * 1024 + (_VF) * 4)) /* _i=0...7, _VF=0...127 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION1_MAX_INDEX 7
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION1_REGION_0_MASK (0x7 << I40E_VFQF_HREGION1_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION1_REGION_1_MASK (0x7 << I40E_VFQF_HREGION1_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION1_REGION_2_MASK (0x7 << I40E_VFQF_HREGION1_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION1_REGION_3_MASK (0x7 << I40E_VFQF_HREGION1_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION1_REGION_4_MASK (0x7 << I40E_VFQF_HREGION1_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION1_REGION_5_MASK (0x7 << I40E_VFQF_HREGION1_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION1_REGION_6_MASK (0x7 << I40E_VFQF_HREGION1_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION1_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION1_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION1_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION1_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION1_REGION_7_MASK (0x7 << I40E_VFQF_HREGION1_REGION_7_SHIFT)
-#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VFQF_HREGION1_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION1_REGION_7_SHIFT)
+#define I40E_VPQF_CTL(_VF) (0x001C0000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
 #define I40E_VPQF_CTL_MAX_INDEX 127
 #define I40E_VPQF_CTL_PEHSIZE_SHIFT 0
-#define I40E_VPQF_CTL_PEHSIZE_MASK (0x1F << I40E_VPQF_CTL_PEHSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEHSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEHSIZE_SHIFT)
 #define I40E_VPQF_CTL_PEDSIZE_SHIFT 5
-#define I40E_VPQF_CTL_PEDSIZE_MASK (0x1F << I40E_VPQF_CTL_PEDSIZE_SHIFT)
+#define I40E_VPQF_CTL_PEDSIZE_MASK I40E_MASK(0x1F, I40E_VPQF_CTL_PEDSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCHSIZE_SHIFT 10
-#define I40E_VPQF_CTL_FCHSIZE_MASK (0xF << I40E_VPQF_CTL_FCHSIZE_SHIFT)
+#define I40E_VPQF_CTL_FCHSIZE_MASK I40E_MASK(0xF, I40E_VPQF_CTL_FCHSIZE_SHIFT)
 #define I40E_VPQF_CTL_FCDSIZE_SHIFT 14
-#define I40E_VPQF_CTL_FCDSIZE_MASK (0x3 << I40E_VPQF_CTL_FCDSIZE_SHIFT)
-#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */
+#define I40E_VPQF_CTL_FCDSIZE_MASK I40E_MASK(0x3, I40E_VPQF_CTL_FCDSIZE_SHIFT)
+#define I40E_VSIQF_CTL(_VSI) (0x0020D800 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_CTL_MAX_INDEX 383
 #define I40E_VSIQF_CTL_FCOE_ENA_SHIFT 0
-#define I40E_VSIQF_CTL_FCOE_ENA_MASK (0x1 << I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
+#define I40E_VSIQF_CTL_FCOE_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_FCOE_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PETCP_ENA_SHIFT 1
-#define I40E_VSIQF_CTL_PETCP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PETCP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PETCP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT 2
-#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT 3
-#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEMUDP_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMUDP_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT 4
-#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
+#define I40E_VSIQF_CTL_PEUFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEUFRAG_ENA_SHIFT)
 #define I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT 5
-#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK (0x1 << I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
-#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4))
+#define I40E_VSIQF_CTL_PEMFRAG_ENA_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_PEMFRAG_ENA_SHIFT)
+#define I40E_VSIQF_TCREGION(_i, _VSI) (0x00206000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...3, _VSI=0...383 */ /* Reset: PFR */
 #define I40E_VSIQF_TCREGION_MAX_INDEX 3
 #define I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT 0
-#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE_SHIFT 9
-#define I40E_VSIQF_TCREGION_TC_SIZE_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_SIZE_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT 16
-#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK (0x1FF << I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
+#define I40E_VSIQF_TCREGION_TC_OFFSET2_MASK I40E_MASK(0x1FF, I40E_VSIQF_TCREGION_TC_OFFSET2_SHIFT)
 #define I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT 25
-#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK (0x7 << I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
-#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_VSIQF_TCREGION_TC_SIZE2_MASK I40E_MASK(0x7, I40E_VSIQF_TCREGION_TC_SIZE2_SHIFT)
+#define I40E_GL_FCOECRC(_i) (0x00314d80 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOECRC_MAX_INDEX 143
 #define I40E_GL_FCOECRC_FCOECRC_SHIFT 0
-#define I40E_GL_FCOECRC_FCOECRC_MASK (0xFFFFFFFF << I40E_GL_FCOECRC_FCOECRC_SHIFT)
-#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOECRC_FCOECRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOECRC_FCOECRC_SHIFT)
+#define I40E_GL_FCOEDDPC(_i) (0x00314480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDDPC_MAX_INDEX 143
 #define I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT 0
-#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK (0xFFFFFFFF << I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
-/* _i=0...143 */
-#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDDPC_FCOEDDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDDPC_FCOEDDPC_SHIFT)
+#define I40E_GL_FCOEDIFEC(_i) (0x00318480 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFRC(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIFRC_MAX_INDEX 143
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT 0
-#define I40E_GL_FCOEDIFRC_FCOEDIFRC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFRC_FCOEDIFRC_SHIFT)
-#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFEC_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFEC_FCOEDIFRC_SHIFT)
+#define I40E_GL_FCOEDIFTCL(_i) (0x00354000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIFTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT 0
-#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
-#define I40E_GL_FCOEDIXAC(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */
-#define I40E_GL_FCOEDIXAC_MAX_INDEX 143
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT 0
-#define I40E_GL_FCOEDIXAC_FCOEDIXAC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXAC_FCOEDIXAC_SHIFT)
-#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIFTCL_FCOEDIFTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIFTCL_FCOEDIFTC_SHIFT)
+#define I40E_GL_FCOEDIXEC(_i) (0x0034c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXEC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT 0
-#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
-#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXEC_FCOEDIXEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXEC_FCOEDIXEC_SHIFT)
+#define I40E_GL_FCOEDIXVC(_i) (0x00350000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDIXVC_MAX_INDEX 143
 #define I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT 0
-#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK (0xFFFFFFFF << I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
-#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDIXVC_FCOEDIXVC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDIXVC_FCOEDIXVC_SHIFT)
+#define I40E_GL_FCOEDWRCH(_i) (0x00320004 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT 0
-#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK (0xFFFF << I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
-#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCH_FCOEDWRCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWRCH_FCOEDWRCH_SHIFT)
+#define I40E_GL_FCOEDWRCL(_i) (0x00320000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWRCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT 0
-#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
-#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWRCL_FCOEDWRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWRCL_FCOEDWRCL_SHIFT)
+#define I40E_GL_FCOEDWTCH(_i) (0x00348084 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCH_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT 0
-#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK (0xFFFF << I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
-#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCH_FCOEDWTCH_MASK I40E_MASK(0xFFFF, I40E_GL_FCOEDWTCH_FCOEDWTCH_SHIFT)
+#define I40E_GL_FCOEDWTCL(_i) (0x00348080 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEDWTCL_MAX_INDEX 143
 #define I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT 0
-#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK (0xFFFFFFFF << I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
-#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEDWTCL_FCOEDWTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEDWTCL_FCOEDWTCL_SHIFT)
+#define I40E_GL_FCOELAST(_i) (0x00314000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOELAST_MAX_INDEX 143
 #define I40E_GL_FCOELAST_FCOELAST_SHIFT 0
-#define I40E_GL_FCOELAST_FCOELAST_MASK (0xFFFFFFFF << I40E_GL_FCOELAST_FCOELAST_SHIFT)
-#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOELAST_FCOELAST_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOELAST_FCOELAST_SHIFT)
+#define I40E_GL_FCOEPRC(_i) (0x00315200 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPRC_MAX_INDEX 143
 #define I40E_GL_FCOEPRC_FCOEPRC_SHIFT 0
-#define I40E_GL_FCOEPRC_FCOEPRC_MASK (0xFFFFFFFF << I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
-#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPRC_FCOEPRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPRC_FCOEPRC_SHIFT)
+#define I40E_GL_FCOEPTC(_i) (0x00344C00 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOEPTC_MAX_INDEX 143
 #define I40E_GL_FCOEPTC_FCOEPTC_SHIFT 0
-#define I40E_GL_FCOEPTC_FCOEPTC_MASK (0xFFFFFFFF << I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
-#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */
+#define I40E_GL_FCOEPTC_FCOEPTC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOEPTC_FCOEPTC_SHIFT)
+#define I40E_GL_FCOERPDC(_i) (0x00324000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
 #define I40E_GL_FCOERPDC_MAX_INDEX 143
 #define I40E_GL_FCOERPDC_FCOERPDC_SHIFT 0
-#define I40E_GL_FCOERPDC_FCOERPDC_MASK (0xFFFFFFFF << I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
-#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GL_FCOERPDC_FCOERPDC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_FCOERPDC_FCOERPDC_SHIFT)
+#define I40E_GL_RXERR1_L(_i) (0x00318000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR1_L_MAX_INDEX 143
+#define I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT 0
+#define I40E_GL_RXERR1_L_FCOEDIFRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR1_L_FCOEDIFRC_SHIFT)
+#define I40E_GL_RXERR2_L(_i) (0x0031c000 + ((_i) * 8)) /* _i=0...143 */ /* Reset: CORER */
+#define I40E_GL_RXERR2_L_MAX_INDEX 143
+#define I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT 0
+#define I40E_GL_RXERR2_L_FCOEDIXAC_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_RXERR2_L_FCOEDIXAC_SHIFT)
+#define I40E_GLPRT_BPRCH(_i) (0x003005E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCH_MAX_INDEX 3
 #define I40E_GLPRT_BPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPRCL(_i) (0x003005E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPRCL_MAX_INDEX 3
 #define I40E_GLPRT_BPRCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPRCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPRCL_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPRCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPRCL_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCH(_i) (0x00300A04 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCH_MAX_INDEX 3
 #define I40E_GLPRT_BPTCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_BPTCH_UPRCH_SHIFT)
-#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_BPTCH_UPRCH_SHIFT)
+#define I40E_GLPRT_BPTCL(_i) (0x00300A00 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_BPTCL_MAX_INDEX 3
 #define I40E_GLPRT_BPTCL_UPRCH_SHIFT 0
-#define I40E_GLPRT_BPTCL_UPRCH_MASK (0xFFFFFFFF << I40E_GLPRT_BPTCL_UPRCH_SHIFT)
-#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_BPTCL_UPRCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_BPTCL_UPRCH_SHIFT)
+#define I40E_GLPRT_CRCERRS(_i) (0x00300080 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_CRCERRS_MAX_INDEX 3
 #define I40E_GLPRT_CRCERRS_CRCERRS_SHIFT 0
-#define I40E_GLPRT_CRCERRS_CRCERRS_MASK (0xFFFFFFFF << I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
-#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_CRCERRS_CRCERRS_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_CRCERRS_CRCERRS_SHIFT)
+#define I40E_GLPRT_GORCH(_i) (0x00300004 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCH_MAX_INDEX 3
 #define I40E_GLPRT_GORCH_GORCH_SHIFT 0
-#define I40E_GLPRT_GORCH_GORCH_MASK (0xFFFF << I40E_GLPRT_GORCH_GORCH_SHIFT)
-#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GORCH_GORCH_SHIFT)
+#define I40E_GLPRT_GORCL(_i) (0x00300000 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GORCL_MAX_INDEX 3
 #define I40E_GLPRT_GORCL_GORCL_SHIFT 0
-#define I40E_GLPRT_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLPRT_GORCL_GORCL_SHIFT)
-#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GORCL_GORCL_SHIFT)
+#define I40E_GLPRT_GOTCH(_i) (0x00300684 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCH_MAX_INDEX 3
 #define I40E_GLPRT_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLPRT_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLPRT_GOTCH_GOTCH_SHIFT)
-#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_GOTCH_GOTCH_SHIFT)
+#define I40E_GLPRT_GOTCL(_i) (0x00300680 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_GOTCL_MAX_INDEX 3
 #define I40E_GLPRT_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLPRT_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLPRT_GOTCL_GOTCL_SHIFT)
-#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_GOTCL_GOTCL_SHIFT)
+#define I40E_GLPRT_ILLERRC(_i) (0x003000E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ILLERRC_MAX_INDEX 3
 #define I40E_GLPRT_ILLERRC_ILLERRC_SHIFT 0
-#define I40E_GLPRT_ILLERRC_ILLERRC_MASK (0xFFFFFFFF << I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
-#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ILLERRC_ILLERRC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ILLERRC_ILLERRC_SHIFT)
+#define I40E_GLPRT_LDPC(_i) (0x00300620 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LDPC_MAX_INDEX 3
 #define I40E_GLPRT_LDPC_LDPC_SHIFT 0
-#define I40E_GLPRT_LDPC_LDPC_MASK (0xFFFFFFFF << I40E_GLPRT_LDPC_LDPC_SHIFT)
-#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LDPC_LDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LDPC_LDPC_SHIFT)
+#define I40E_GLPRT_LXOFFRXC(_i) (0x00300160 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFRXC_LXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_LXOFFTXC(_i) (0x003009A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT 0
-#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
-#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXOFFTXC_LXOFFTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXOFFTXC_LXOFFTXC_SHIFT)
+#define I40E_GLPRT_LXONRXC(_i) (0x00300140 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT 0
-#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
-#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONRXC_LXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONRXC_LXONRXCNT_SHIFT)
+#define I40E_GLPRT_LXONTXC(_i) (0x00300980 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_LXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_LXONTXC_LXONTXC_SHIFT 0
-#define I40E_GLPRT_LXONTXC_LXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
-#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_LXONTXC_LXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_LXONTXC_LXONTXC_SHIFT)
+#define I40E_GLPRT_MLFC(_i) (0x00300020 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MLFC_MAX_INDEX 3
 #define I40E_GLPRT_MLFC_MLFC_SHIFT 0
-#define I40E_GLPRT_MLFC_MLFC_MASK (0xFFFFFFFF << I40E_GLPRT_MLFC_MLFC_SHIFT)
-#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MLFC_MLFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MLFC_MLFC_SHIFT)
+#define I40E_GLPRT_MPRCH(_i) (0x003005C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCH_MAX_INDEX 3
 #define I40E_GLPRT_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLPRT_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLPRT_MPRCH_MPRCH_SHIFT)
-#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPRCH_MPRCH_SHIFT)
+#define I40E_GLPRT_MPRCL(_i) (0x003005C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPRCL_MAX_INDEX 3
 #define I40E_GLPRT_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLPRT_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPRCL_MPRCL_SHIFT)
-#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPRCL_MPRCL_SHIFT)
+#define I40E_GLPRT_MPTCH(_i) (0x003009E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCH_MAX_INDEX 3
 #define I40E_GLPRT_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLPRT_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLPRT_MPTCH_MPTCH_SHIFT)
-#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_MPTCH_MPTCH_SHIFT)
+#define I40E_GLPRT_MPTCL(_i) (0x003009E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MPTCL_MAX_INDEX 3
 #define I40E_GLPRT_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLPRT_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLPRT_MPTCL_MPTCL_SHIFT)
-#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MPTCL_MPTCL_SHIFT)
+#define I40E_GLPRT_MRFC(_i) (0x00300040 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_MRFC_MAX_INDEX 3
 #define I40E_GLPRT_MRFC_MRFC_SHIFT 0
-#define I40E_GLPRT_MRFC_MRFC_MASK (0xFFFFFFFF << I40E_GLPRT_MRFC_MRFC_SHIFT)
-#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_MRFC_MRFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_MRFC_MRFC_SHIFT)
+#define I40E_GLPRT_PRC1023H(_i) (0x00300504 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023H_PRC1023H_SHIFT 0
-#define I40E_GLPRT_PRC1023H_PRC1023H_MASK (0xFFFF << I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
-#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023H_PRC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1023H_PRC1023H_SHIFT)
+#define I40E_GLPRT_PRC1023L(_i) (0x00300500 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1023L_PRC1023L_SHIFT 0
-#define I40E_GLPRT_PRC1023L_PRC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
-#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1023L_PRC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1023L_PRC1023L_SHIFT)
+#define I40E_GLPRT_PRC127H(_i) (0x003004A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127H_MAX_INDEX 3
 #define I40E_GLPRT_PRC127H_PRC127H_SHIFT 0
-#define I40E_GLPRT_PRC127H_PRC127H_MASK (0xFFFF << I40E_GLPRT_PRC127H_PRC127H_SHIFT)
-#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127H_PRC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC127H_PRC127H_SHIFT)
+#define I40E_GLPRT_PRC127L(_i) (0x003004A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC127L_MAX_INDEX 3
 #define I40E_GLPRT_PRC127L_PRC127L_SHIFT 0
-#define I40E_GLPRT_PRC127L_PRC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC127L_PRC127L_SHIFT)
-#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC127L_PRC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC127L_PRC127L_SHIFT)
+#define I40E_GLPRT_PRC1522H(_i) (0x00300524 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC1522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC1522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC1522L(_i) (0x00300520 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC1522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC1522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC1522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC1522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PRC255H(_i) (0x003004C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255H_MAX_INDEX 3
 #define I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT 0
-#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK (0xFFFF << I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
-#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255H_PRTPRC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC255H_PRTPRC255H_SHIFT)
+#define I40E_GLPRT_PRC255L(_i) (0x003004C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC255L_MAX_INDEX 3
 #define I40E_GLPRT_PRC255L_PRC255L_SHIFT 0
-#define I40E_GLPRT_PRC255L_PRC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC255L_PRC255L_SHIFT)
-#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC255L_PRC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC255L_PRC255L_SHIFT)
+#define I40E_GLPRT_PRC511H(_i) (0x003004E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511H_MAX_INDEX 3
 #define I40E_GLPRT_PRC511H_PRC511H_SHIFT 0
-#define I40E_GLPRT_PRC511H_PRC511H_MASK (0xFFFF << I40E_GLPRT_PRC511H_PRC511H_SHIFT)
-#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511H_PRC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC511H_PRC511H_SHIFT)
+#define I40E_GLPRT_PRC511L(_i) (0x003004E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC511L_MAX_INDEX 3
 #define I40E_GLPRT_PRC511L_PRC511L_SHIFT 0
-#define I40E_GLPRT_PRC511L_PRC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC511L_PRC511L_SHIFT)
-#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC511L_PRC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC511L_PRC511L_SHIFT)
+#define I40E_GLPRT_PRC64H(_i) (0x00300484 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64H_MAX_INDEX 3
 #define I40E_GLPRT_PRC64H_PRC64H_SHIFT 0
-#define I40E_GLPRT_PRC64H_PRC64H_MASK (0xFFFF << I40E_GLPRT_PRC64H_PRC64H_SHIFT)
-#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64H_PRC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC64H_PRC64H_SHIFT)
+#define I40E_GLPRT_PRC64L(_i) (0x00300480 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC64L_MAX_INDEX 3
 #define I40E_GLPRT_PRC64L_PRC64L_SHIFT 0
-#define I40E_GLPRT_PRC64L_PRC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC64L_PRC64L_SHIFT)
-#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC64L_PRC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC64L_PRC64L_SHIFT)
+#define I40E_GLPRT_PRC9522H(_i) (0x00300544 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522H_PRC1522H_SHIFT 0
-#define I40E_GLPRT_PRC9522H_PRC1522H_MASK (0xFFFF << I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
-#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522H_PRC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PRC9522H_PRC1522H_SHIFT)
+#define I40E_GLPRT_PRC9522L(_i) (0x00300540 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PRC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PRC9522L_PRC1522L_SHIFT 0
-#define I40E_GLPRT_PRC9522L_PRC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
-#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PRC9522L_PRC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PRC9522L_PRC1522L_SHIFT)
+#define I40E_GLPRT_PTC1023H(_i) (0x00300724 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023H_PTC1023H_SHIFT 0
-#define I40E_GLPRT_PTC1023H_PTC1023H_MASK (0xFFFF << I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
-#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023H_PTC1023H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1023H_PTC1023H_SHIFT)
+#define I40E_GLPRT_PTC1023L(_i) (0x00300720 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1023L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1023L_PTC1023L_SHIFT 0
-#define I40E_GLPRT_PTC1023L_PTC1023L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
-#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1023L_PTC1023L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1023L_PTC1023L_SHIFT)
+#define I40E_GLPRT_PTC127H(_i) (0x003006C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127H_MAX_INDEX 3
 #define I40E_GLPRT_PTC127H_PTC127H_SHIFT 0
-#define I40E_GLPRT_PTC127H_PTC127H_MASK (0xFFFF << I40E_GLPRT_PTC127H_PTC127H_SHIFT)
-#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127H_PTC127H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC127H_PTC127H_SHIFT)
+#define I40E_GLPRT_PTC127L(_i) (0x003006C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC127L_MAX_INDEX 3
 #define I40E_GLPRT_PTC127L_PTC127L_SHIFT 0
-#define I40E_GLPRT_PTC127L_PTC127L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC127L_PTC127L_SHIFT)
-#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC127L_PTC127L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC127L_PTC127L_SHIFT)
+#define I40E_GLPRT_PTC1522H(_i) (0x00300744 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522H_PTC1522H_SHIFT 0
-#define I40E_GLPRT_PTC1522H_PTC1522H_MASK (0xFFFF << I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
-#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522H_PTC1522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC1522H_PTC1522H_SHIFT)
+#define I40E_GLPRT_PTC1522L(_i) (0x00300740 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC1522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC1522L_PTC1522L_SHIFT 0
-#define I40E_GLPRT_PTC1522L_PTC1522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
-#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC1522L_PTC1522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC1522L_PTC1522L_SHIFT)
+#define I40E_GLPRT_PTC255H(_i) (0x003006E4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255H_MAX_INDEX 3
 #define I40E_GLPRT_PTC255H_PTC255H_SHIFT 0
-#define I40E_GLPRT_PTC255H_PTC255H_MASK (0xFFFF << I40E_GLPRT_PTC255H_PTC255H_SHIFT)
-#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255H_PTC255H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC255H_PTC255H_SHIFT)
+#define I40E_GLPRT_PTC255L(_i) (0x003006E0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC255L_MAX_INDEX 3
 #define I40E_GLPRT_PTC255L_PTC255L_SHIFT 0
-#define I40E_GLPRT_PTC255L_PTC255L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC255L_PTC255L_SHIFT)
-#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC255L_PTC255L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC255L_PTC255L_SHIFT)
+#define I40E_GLPRT_PTC511H(_i) (0x00300704 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511H_MAX_INDEX 3
 #define I40E_GLPRT_PTC511H_PTC511H_SHIFT 0
-#define I40E_GLPRT_PTC511H_PTC511H_MASK (0xFFFF << I40E_GLPRT_PTC511H_PTC511H_SHIFT)
-#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511H_PTC511H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC511H_PTC511H_SHIFT)
+#define I40E_GLPRT_PTC511L(_i) (0x00300700 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC511L_MAX_INDEX 3
 #define I40E_GLPRT_PTC511L_PTC511L_SHIFT 0
-#define I40E_GLPRT_PTC511L_PTC511L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC511L_PTC511L_SHIFT)
-#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC511L_PTC511L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC511L_PTC511L_SHIFT)
+#define I40E_GLPRT_PTC64H(_i) (0x003006A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64H_MAX_INDEX 3
 #define I40E_GLPRT_PTC64H_PTC64H_SHIFT 0
-#define I40E_GLPRT_PTC64H_PTC64H_MASK (0xFFFF << I40E_GLPRT_PTC64H_PTC64H_SHIFT)
-#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64H_PTC64H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC64H_PTC64H_SHIFT)
+#define I40E_GLPRT_PTC64L(_i) (0x003006A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC64L_MAX_INDEX 3
 #define I40E_GLPRT_PTC64L_PTC64L_SHIFT 0
-#define I40E_GLPRT_PTC64L_PTC64L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC64L_PTC64L_SHIFT)
-#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC64L_PTC64L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC64L_PTC64L_SHIFT)
+#define I40E_GLPRT_PTC9522H(_i) (0x00300764 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522H_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522H_PTC9522H_SHIFT 0
-#define I40E_GLPRT_PTC9522H_PTC9522H_MASK (0xFFFF << I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
-#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PTC9522H_PTC9522H_MASK I40E_MASK(0xFFFF, I40E_GLPRT_PTC9522H_PTC9522H_SHIFT)
+#define I40E_GLPRT_PTC9522L(_i) (0x00300760 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_PTC9522L_MAX_INDEX 3
 #define I40E_GLPRT_PTC9522L_PTC9522L_SHIFT 0
-#define I40E_GLPRT_PTC9522L_PTC9522L_MASK (0xFFFFFFFF << I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
-#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PTC9522L_PTC9522L_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PTC9522L_PTC9522L_SHIFT)
+#define I40E_GLPRT_PXOFFRXC(_i, _j) (0x00300280 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
-#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFRXC_PRPXOFFRXCNT_SHIFT)
+#define I40E_GLPRT_PXOFFTXC(_i, _j) (0x00300880 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXOFFTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT 0
-#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
-#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXOFFTXC_PRPXOFFTXCNT_SHIFT)
+#define I40E_GLPRT_PXONRXC(_i, _j) (0x00300180 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONRXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT 0
-#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK (0xFFFFFFFF << I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
-#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_PXONRXC_PRPXONRXCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONRXC_PRPXONRXCNT_SHIFT)
+#define I40E_GLPRT_PXONTXC(_i, _j) (0x00300780 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_PXONTXC_MAX_INDEX 3
 #define I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT 0
-#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK (0xFFFFFFFF << I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
-#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_PXONTXC_PRPXONTXC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_PXONTXC_PRPXONTXC_SHIFT)
+#define I40E_GLPRT_RDPC(_i) (0x00300600 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RDPC_MAX_INDEX 3
 #define I40E_GLPRT_RDPC_RDPC_SHIFT 0
-#define I40E_GLPRT_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLPRT_RDPC_RDPC_SHIFT)
-#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RDPC_RDPC_SHIFT)
+#define I40E_GLPRT_RFC(_i) (0x00300560 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RFC_MAX_INDEX 3
 #define I40E_GLPRT_RFC_RFC_SHIFT 0
-#define I40E_GLPRT_RFC_RFC_MASK (0xFFFFFFFF << I40E_GLPRT_RFC_RFC_SHIFT)
-#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RFC_RFC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RFC_RFC_SHIFT)
+#define I40E_GLPRT_RJC(_i) (0x00300580 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RJC_MAX_INDEX 3
 #define I40E_GLPRT_RJC_RJC_SHIFT 0
-#define I40E_GLPRT_RJC_RJC_MASK (0xFFFFFFFF << I40E_GLPRT_RJC_RJC_SHIFT)
-#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RJC_RJC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RJC_RJC_SHIFT)
+#define I40E_GLPRT_RLEC(_i) (0x003000A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RLEC_MAX_INDEX 3
 #define I40E_GLPRT_RLEC_RLEC_SHIFT 0
-#define I40E_GLPRT_RLEC_RLEC_MASK (0xFFFFFFFF << I40E_GLPRT_RLEC_RLEC_SHIFT)
-#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RLEC_RLEC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RLEC_RLEC_SHIFT)
+#define I40E_GLPRT_ROC(_i) (0x00300120 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_ROC_MAX_INDEX 3
 #define I40E_GLPRT_ROC_ROC_SHIFT 0
-#define I40E_GLPRT_ROC_ROC_MASK (0xFFFFFFFF << I40E_GLPRT_ROC_ROC_SHIFT)
-#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_ROC_ROC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_ROC_ROC_SHIFT)
+#define I40E_GLPRT_RUC(_i) (0x00300100 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUC_MAX_INDEX 3
 #define I40E_GLPRT_RUC_RUC_SHIFT 0
-#define I40E_GLPRT_RUC_RUC_MASK (0xFFFFFFFF << I40E_GLPRT_RUC_RUC_SHIFT)
-#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RUC_RUC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUC_RUC_SHIFT)
+#define I40E_GLPRT_RUPP(_i) (0x00300660 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_RUPP_MAX_INDEX 3
 #define I40E_GLPRT_RUPP_RUPP_SHIFT 0
-#define I40E_GLPRT_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLPRT_RUPP_RUPP_SHIFT)
-#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32))
+#define I40E_GLPRT_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RUPP_RUPP_SHIFT)
+#define I40E_GLPRT_RXON2OFFCNT(_i, _j) (0x00300380 + ((_i) * 8 + (_j) * 32)) /* _i=0...3, _j=0...7 */ /* Reset: CORER */
 #define I40E_GLPRT_RXON2OFFCNT_MAX_INDEX 3
 #define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT 0
-#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK (0xFFFFFFFF << I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
-#define I40E_GLPRT_STDC(_i) (0x00300640 + ((_i) * 8)) /* _i=0...3 */
-#define I40E_GLPRT_STDC_MAX_INDEX 3
-#define I40E_GLPRT_STDC_STDC_SHIFT 0
-#define I40E_GLPRT_STDC_STDC_MASK (0xFFFFFFFF << I40E_GLPRT_STDC_STDC_SHIFT)
-#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_RXON2OFFCNT_PRRXON2OFFCNT_SHIFT)
+#define I40E_GLPRT_TDOLD(_i) (0x00300A20 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDOLD_MAX_INDEX 3
 #define I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT 0
-#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK (0xFFFFFFFF << I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
-#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDOLD_GLPRT_TDOLD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDOLD_GLPRT_TDOLD_SHIFT)
+#define I40E_GLPRT_TDPC(_i) (0x00375400 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_TDPC_MAX_INDEX 3
 #define I40E_GLPRT_TDPC_TDPC_SHIFT 0
-#define I40E_GLPRT_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLPRT_TDPC_TDPC_SHIFT)
-#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_TDPC_TDPC_SHIFT)
+#define I40E_GLPRT_UPRCH(_i) (0x003005A4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCH_MAX_INDEX 3
 #define I40E_GLPRT_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLPRT_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLPRT_UPRCH_UPRCH_SHIFT)
-#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPRCH_UPRCH_SHIFT)
+#define I40E_GLPRT_UPRCL(_i) (0x003005A0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPRCL_MAX_INDEX 3
 #define I40E_GLPRT_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLPRT_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLPRT_UPRCL_UPRCL_SHIFT)
-#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPRCL_UPRCL_SHIFT)
+#define I40E_GLPRT_UPTCH(_i) (0x003009C4 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCH_MAX_INDEX 3
 #define I40E_GLPRT_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLPRT_UPTCH_UPTCH_SHIFT)
-#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */
+#define I40E_GLPRT_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLPRT_UPTCH_UPTCH_SHIFT)
+#define I40E_GLPRT_UPTCL(_i) (0x003009C0 + ((_i) * 8)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_GLPRT_UPTCL_MAX_INDEX 3
 #define I40E_GLPRT_UPTCL_VUPTCH_SHIFT 0
-#define I40E_GLPRT_UPTCL_VUPTCH_MASK (0xFFFFFFFF << I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
-#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLPRT_UPTCL_VUPTCH_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPRT_UPTCL_VUPTCH_SHIFT)
+#define I40E_GLSW_BPRCH(_i) (0x00370104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCH_MAX_INDEX 15
 #define I40E_GLSW_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLSW_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLSW_BPRCH_BPRCH_SHIFT)
-#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPRCH_BPRCH_SHIFT)
+#define I40E_GLSW_BPRCL(_i) (0x00370100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPRCL_MAX_INDEX 15
 #define I40E_GLSW_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLSW_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLSW_BPRCL_BPRCL_SHIFT)
-#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPRCL_BPRCL_SHIFT)
+#define I40E_GLSW_BPTCH(_i) (0x00340104 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCH_MAX_INDEX 15
 #define I40E_GLSW_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLSW_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLSW_BPTCH_BPTCH_SHIFT)
-#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_BPTCH_BPTCH_SHIFT)
+#define I40E_GLSW_BPTCL(_i) (0x00340100 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_BPTCL_MAX_INDEX 15
 #define I40E_GLSW_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLSW_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLSW_BPTCL_BPTCL_SHIFT)
-#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_BPTCL_BPTCL_SHIFT)
+#define I40E_GLSW_GORCH(_i) (0x0035C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCH_MAX_INDEX 15
 #define I40E_GLSW_GORCH_GORCH_SHIFT 0
-#define I40E_GLSW_GORCH_GORCH_MASK (0xFFFF << I40E_GLSW_GORCH_GORCH_SHIFT)
-#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GORCH_GORCH_SHIFT)
+#define I40E_GLSW_GORCL(_i) (0x0035c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GORCL_MAX_INDEX 15
 #define I40E_GLSW_GORCL_GORCL_SHIFT 0
-#define I40E_GLSW_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLSW_GORCL_GORCL_SHIFT)
-#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GORCL_GORCL_SHIFT)
+#define I40E_GLSW_GOTCH(_i) (0x0032C004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCH_MAX_INDEX 15
 #define I40E_GLSW_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLSW_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLSW_GOTCH_GOTCH_SHIFT)
-#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_GOTCH_GOTCH_SHIFT)
+#define I40E_GLSW_GOTCL(_i) (0x0032c000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_GOTCL_MAX_INDEX 15
 #define I40E_GLSW_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLSW_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLSW_GOTCL_GOTCL_SHIFT)
-#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_GOTCL_GOTCL_SHIFT)
+#define I40E_GLSW_MPRCH(_i) (0x00370084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCH_MAX_INDEX 15
 #define I40E_GLSW_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLSW_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLSW_MPRCH_MPRCH_SHIFT)
-#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPRCH_MPRCH_SHIFT)
+#define I40E_GLSW_MPRCL(_i) (0x00370080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPRCL_MAX_INDEX 15
 #define I40E_GLSW_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLSW_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLSW_MPRCL_MPRCL_SHIFT)
-#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPRCL_MPRCL_SHIFT)
+#define I40E_GLSW_MPTCH(_i) (0x00340084 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCH_MAX_INDEX 15
 #define I40E_GLSW_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLSW_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLSW_MPTCH_MPTCH_SHIFT)
-#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_MPTCH_MPTCH_SHIFT)
+#define I40E_GLSW_MPTCL(_i) (0x00340080 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_MPTCL_MAX_INDEX 15
 #define I40E_GLSW_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLSW_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLSW_MPTCL_MPTCL_SHIFT)
-#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_MPTCL_MPTCL_SHIFT)
+#define I40E_GLSW_RUPP(_i) (0x00370180 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_RUPP_MAX_INDEX 15
 #define I40E_GLSW_RUPP_RUPP_SHIFT 0
-#define I40E_GLSW_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLSW_RUPP_RUPP_SHIFT)
-#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_RUPP_RUPP_SHIFT)
+#define I40E_GLSW_TDPC(_i) (0x00348000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_TDPC_MAX_INDEX 15
 #define I40E_GLSW_TDPC_TDPC_SHIFT 0
-#define I40E_GLSW_TDPC_TDPC_MASK (0xFFFFFFFF << I40E_GLSW_TDPC_TDPC_SHIFT)
-#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_TDPC_TDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_TDPC_TDPC_SHIFT)
+#define I40E_GLSW_UPRCH(_i) (0x00370004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCH_MAX_INDEX 15
 #define I40E_GLSW_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLSW_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLSW_UPRCH_UPRCH_SHIFT)
-#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPRCH_UPRCH_SHIFT)
+#define I40E_GLSW_UPRCL(_i) (0x00370000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPRCL_MAX_INDEX 15
 #define I40E_GLSW_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLSW_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLSW_UPRCL_UPRCL_SHIFT)
-#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPRCL_UPRCL_SHIFT)
+#define I40E_GLSW_UPTCH(_i) (0x00340004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCH_MAX_INDEX 15
 #define I40E_GLSW_UPTCH_UPTCH_SHIFT 0
-#define I40E_GLSW_UPTCH_UPTCH_MASK (0xFFFF << I40E_GLSW_UPTCH_UPTCH_SHIFT)
-#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */
+#define I40E_GLSW_UPTCH_UPTCH_MASK I40E_MASK(0xFFFF, I40E_GLSW_UPTCH_UPTCH_SHIFT)
+#define I40E_GLSW_UPTCL(_i) (0x00340000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_GLSW_UPTCL_MAX_INDEX 15
 #define I40E_GLSW_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLSW_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLSW_UPTCL_UPTCL_SHIFT)
-#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLSW_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLSW_UPTCL_UPTCL_SHIFT)
+#define I40E_GLV_BPRCH(_i) (0x0036D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCH_MAX_INDEX 383
 #define I40E_GLV_BPRCH_BPRCH_SHIFT 0
-#define I40E_GLV_BPRCH_BPRCH_MASK (0xFFFF << I40E_GLV_BPRCH_BPRCH_SHIFT)
-#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCH_BPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPRCH_BPRCH_SHIFT)
+#define I40E_GLV_BPRCL(_i) (0x0036d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPRCL_MAX_INDEX 383
 #define I40E_GLV_BPRCL_BPRCL_SHIFT 0
-#define I40E_GLV_BPRCL_BPRCL_MASK (0xFFFFFFFF << I40E_GLV_BPRCL_BPRCL_SHIFT)
-#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPRCL_BPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPRCL_BPRCL_SHIFT)
+#define I40E_GLV_BPTCH(_i) (0x0033D804 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCH_MAX_INDEX 383
 #define I40E_GLV_BPTCH_BPTCH_SHIFT 0
-#define I40E_GLV_BPTCH_BPTCH_MASK (0xFFFF << I40E_GLV_BPTCH_BPTCH_SHIFT)
-#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCH_BPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_BPTCH_BPTCH_SHIFT)
+#define I40E_GLV_BPTCL(_i) (0x0033d800 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_BPTCL_MAX_INDEX 383
 #define I40E_GLV_BPTCL_BPTCL_SHIFT 0
-#define I40E_GLV_BPTCL_BPTCL_MASK (0xFFFFFFFF << I40E_GLV_BPTCL_BPTCL_SHIFT)
-#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_BPTCL_BPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_BPTCL_BPTCL_SHIFT)
+#define I40E_GLV_GORCH(_i) (0x00358004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCH_MAX_INDEX 383
 #define I40E_GLV_GORCH_GORCH_SHIFT 0
-#define I40E_GLV_GORCH_GORCH_MASK (0xFFFF << I40E_GLV_GORCH_GORCH_SHIFT)
-#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCH_GORCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GORCH_GORCH_SHIFT)
+#define I40E_GLV_GORCL(_i) (0x00358000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GORCL_MAX_INDEX 383
 #define I40E_GLV_GORCL_GORCL_SHIFT 0
-#define I40E_GLV_GORCL_GORCL_MASK (0xFFFFFFFF << I40E_GLV_GORCL_GORCL_SHIFT)
-#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GORCL_GORCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GORCL_GORCL_SHIFT)
+#define I40E_GLV_GOTCH(_i) (0x00328004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCH_MAX_INDEX 383
 #define I40E_GLV_GOTCH_GOTCH_SHIFT 0
-#define I40E_GLV_GOTCH_GOTCH_MASK (0xFFFF << I40E_GLV_GOTCH_GOTCH_SHIFT)
-#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCH_GOTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_GOTCH_GOTCH_SHIFT)
+#define I40E_GLV_GOTCL(_i) (0x00328000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_GOTCL_MAX_INDEX 383
 #define I40E_GLV_GOTCL_GOTCL_SHIFT 0
-#define I40E_GLV_GOTCL_GOTCL_MASK (0xFFFFFFFF << I40E_GLV_GOTCL_GOTCL_SHIFT)
-#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_GOTCL_GOTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_GOTCL_GOTCL_SHIFT)
+#define I40E_GLV_MPRCH(_i) (0x0036CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCH_MAX_INDEX 383
 #define I40E_GLV_MPRCH_MPRCH_SHIFT 0
-#define I40E_GLV_MPRCH_MPRCH_MASK (0xFFFF << I40E_GLV_MPRCH_MPRCH_SHIFT)
-#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCH_MPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPRCH_MPRCH_SHIFT)
+#define I40E_GLV_MPRCL(_i) (0x0036cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPRCL_MAX_INDEX 383
 #define I40E_GLV_MPRCL_MPRCL_SHIFT 0
-#define I40E_GLV_MPRCL_MPRCL_MASK (0xFFFFFFFF << I40E_GLV_MPRCL_MPRCL_SHIFT)
-#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPRCL_MPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPRCL_MPRCL_SHIFT)
+#define I40E_GLV_MPTCH(_i) (0x0033CC04 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCH_MAX_INDEX 383
 #define I40E_GLV_MPTCH_MPTCH_SHIFT 0
-#define I40E_GLV_MPTCH_MPTCH_MASK (0xFFFF << I40E_GLV_MPTCH_MPTCH_SHIFT)
-#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCH_MPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_MPTCH_MPTCH_SHIFT)
+#define I40E_GLV_MPTCL(_i) (0x0033cc00 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_MPTCL_MAX_INDEX 383
 #define I40E_GLV_MPTCL_MPTCL_SHIFT 0
-#define I40E_GLV_MPTCL_MPTCL_MASK (0xFFFFFFFF << I40E_GLV_MPTCL_MPTCL_SHIFT)
-#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_MPTCL_MPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_MPTCL_MPTCL_SHIFT)
+#define I40E_GLV_RDPC(_i) (0x00310000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RDPC_MAX_INDEX 383
 #define I40E_GLV_RDPC_RDPC_SHIFT 0
-#define I40E_GLV_RDPC_RDPC_MASK (0xFFFFFFFF << I40E_GLV_RDPC_RDPC_SHIFT)
-#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_RDPC_RDPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RDPC_RDPC_SHIFT)
+#define I40E_GLV_RUPP(_i) (0x0036E400 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_RUPP_MAX_INDEX 383
 #define I40E_GLV_RUPP_RUPP_SHIFT 0
-#define I40E_GLV_RUPP_RUPP_MASK (0xFFFFFFFF << I40E_GLV_RUPP_RUPP_SHIFT)
-#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 8)) /* _i=0...383 */
+#define I40E_GLV_RUPP_RUPP_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_RUPP_RUPP_SHIFT)
+#define I40E_GLV_TEPC(_VSI) (0x00344000 + ((_VSI) * 4)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_TEPC_MAX_INDEX 383
 #define I40E_GLV_TEPC_TEPC_SHIFT 0
-#define I40E_GLV_TEPC_TEPC_MASK (0xFFFFFFFF << I40E_GLV_TEPC_TEPC_SHIFT)
-#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_TEPC_TEPC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_TEPC_TEPC_SHIFT)
+#define I40E_GLV_UPRCH(_i) (0x0036C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCH_MAX_INDEX 383
 #define I40E_GLV_UPRCH_UPRCH_SHIFT 0
-#define I40E_GLV_UPRCH_UPRCH_MASK (0xFFFF << I40E_GLV_UPRCH_UPRCH_SHIFT)
-#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCH_UPRCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPRCH_UPRCH_SHIFT)
+#define I40E_GLV_UPRCL(_i) (0x0036c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPRCL_MAX_INDEX 383
 #define I40E_GLV_UPRCL_UPRCL_SHIFT 0
-#define I40E_GLV_UPRCL_UPRCL_MASK (0xFFFFFFFF << I40E_GLV_UPRCL_UPRCL_SHIFT)
-#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPRCL_UPRCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPRCL_UPRCL_SHIFT)
+#define I40E_GLV_UPTCH(_i) (0x0033C004 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCH_MAX_INDEX 383
 #define I40E_GLV_UPTCH_GLVUPTCH_SHIFT 0
-#define I40E_GLV_UPTCH_GLVUPTCH_MASK (0xFFFF << I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
-#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */
+#define I40E_GLV_UPTCH_GLVUPTCH_MASK I40E_MASK(0xFFFF, I40E_GLV_UPTCH_GLVUPTCH_SHIFT)
+#define I40E_GLV_UPTCL(_i) (0x0033c000 + ((_i) * 8)) /* _i=0...383 */ /* Reset: CORER */
 #define I40E_GLV_UPTCL_MAX_INDEX 383
 #define I40E_GLV_UPTCL_UPTCL_SHIFT 0
-#define I40E_GLV_UPTCL_UPTCL_MASK (0xFFFFFFFF << I40E_GLV_UPTCL_UPTCL_SHIFT)
-#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLV_UPTCL_UPTCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLV_UPTCL_UPTCL_SHIFT)
+#define I40E_GLVEBTC_RBCH(_i, _j) (0x00364004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_RBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_RBCL(_i, _j) (0x00364000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_RBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_RPCH(_i, _j) (0x00368004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_RPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_RPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_RPCL(_i, _j) (0x00368000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_RPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_RPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_RPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
-#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_RPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_RPCL_TCPCL_SHIFT)
+#define I40E_GLVEBTC_TBCH(_i, _j) (0x00334004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCH_TCBCH_SHIFT 0
-#define I40E_GLVEBTC_TBCH_TCBCH_MASK (0xFFFF << I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
-#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCH_TCBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TBCH_TCBCH_SHIFT)
+#define I40E_GLVEBTC_TBCL(_i, _j) (0x00334000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TBCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TBCL_TCBCL_SHIFT 0
-#define I40E_GLVEBTC_TBCL_TCBCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
-#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TBCL_TCBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TBCL_TCBCL_SHIFT)
+#define I40E_GLVEBTC_TPCH(_i, _j) (0x00338004 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCH_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCH_TCPCH_SHIFT 0
-#define I40E_GLVEBTC_TPCH_TCPCH_MASK (0xFFFF << I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
-#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */
+#define I40E_GLVEBTC_TPCH_TCPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBTC_TPCH_TCPCH_SHIFT)
+#define I40E_GLVEBTC_TPCL(_i, _j) (0x00338000 + ((_i) * 8 + (_j) * 64)) /* _i=0...7, _j=0...15 */ /* Reset: CORER */
 #define I40E_GLVEBTC_TPCL_MAX_INDEX 7
 #define I40E_GLVEBTC_TPCL_TCPCL_SHIFT 0
-#define I40E_GLVEBTC_TPCL_TCPCL_MASK (0xFFFFFFFF << I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
-#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBTC_TPCL_TCPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBTC_TPCL_TCPCL_SHIFT)
+#define I40E_GLVEBVL_BPCH(_i) (0x00374804 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCH_VLBPCH_SHIFT 0
-#define I40E_GLVEBVL_BPCH_VLBPCH_MASK (0xFFFF << I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
-#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCH_VLBPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_BPCH_VLBPCH_SHIFT)
+#define I40E_GLVEBVL_BPCL(_i) (0x00374800 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_BPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_BPCL_VLBPCL_SHIFT 0
-#define I40E_GLVEBVL_BPCL_VLBPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
-#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_BPCL_VLBPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_BPCL_VLBPCL_SHIFT)
+#define I40E_GLVEBVL_GORCH(_i) (0x00360004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GORCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GORCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GORCL(_i) (0x00360000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GORCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GORCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GORCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GORCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GORCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_GOTCH(_i) (0x00330004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCH_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCH_VLBCH_SHIFT 0
-#define I40E_GLVEBVL_GOTCH_VLBCH_MASK (0xFFFF << I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
-#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCH_VLBCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_GOTCH_VLBCH_SHIFT)
+#define I40E_GLVEBVL_GOTCL(_i) (0x00330000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_GOTCL_MAX_INDEX 127
 #define I40E_GLVEBVL_GOTCL_VLBCL_SHIFT 0
-#define I40E_GLVEBVL_GOTCL_VLBCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
-#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_GOTCL_VLBCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_GOTCL_VLBCL_SHIFT)
+#define I40E_GLVEBVL_MPCH(_i) (0x00374404 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCH_VLMPCH_SHIFT 0
-#define I40E_GLVEBVL_MPCH_VLMPCH_MASK (0xFFFF << I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
-#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCH_VLMPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_MPCH_VLMPCH_SHIFT)
+#define I40E_GLVEBVL_MPCL(_i) (0x00374400 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_MPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_MPCL_VLMPCL_SHIFT 0
-#define I40E_GLVEBVL_MPCL_VLMPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
-#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_MPCL_VLMPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_MPCL_VLMPCL_SHIFT)
+#define I40E_GLVEBVL_UPCH(_i) (0x00374004 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCH_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCH_VLUPCH_SHIFT 0
-#define I40E_GLVEBVL_UPCH_VLUPCH_MASK (0xFFFF << I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
-#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */
+#define I40E_GLVEBVL_UPCH_VLUPCH_MASK I40E_MASK(0xFFFF, I40E_GLVEBVL_UPCH_VLUPCH_SHIFT)
+#define I40E_GLVEBVL_UPCL(_i) (0x00374000 + ((_i) * 8)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_GLVEBVL_UPCL_MAX_INDEX 127
 #define I40E_GLVEBVL_UPCL_VLUPCL_SHIFT 0
-#define I40E_GLVEBVL_UPCL_VLUPCL_MASK (0xFFFFFFFF << I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C
+#define I40E_GLVEBVL_UPCL_VLUPCL_MASK I40E_MASK(0xFFFFFFFF, I40E_GLVEBVL_UPCL_VLUPCL_SHIFT)
+#define I40E_GL_MTG_FLU_MSK_H 0x00269F4C /* Reset: CORER */
 #define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK (0xFFFF << I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
-#define I40E_GL_MTG_FLU_MSK_L 0x00269F44
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT 0
-#define I40E_GL_MTG_FLU_MSK_L_MASK_LOW_MASK (0xFFFFFFFF << I40E_GL_MTG_FLU_MSK_L_MASK_LOW_SHIFT)
-#define I40E_GL_SWR_DEF_ACT(_i) (0x0026CF00 + ((_i) * 4)) /* _i=0...25 */
-#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 25
+#define I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_MASK I40E_MASK(0xFFFF, I40E_GL_MTG_FLU_MSK_H_MASK_HIGH_SHIFT)
+#define I40E_GL_SWR_DEF_ACT(_i) (0x00270200 + ((_i) * 4)) /* _i=0...35 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_MAX_INDEX 35
 #define I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
-#define I40E_GL_SWR_DEF_ACT_EN 0x0026CF84
+#define I40E_GL_SWR_DEF_ACT_DEF_ACTION_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_DEF_ACTION_SHIFT)
+#define I40E_GL_SWR_DEF_ACT_EN(_i) (0x0026CFB8 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GL_SWR_DEF_ACT_EN_MAX_INDEX 1
 #define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT 0
-#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK (0xFFFFFFFF << I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
-#define I40E_PRT_MSCCNT 0x00256BA0
-#define I40E_PRT_MSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_MSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_MSCCNT_CCOUNT_SHIFT)
-#define I40E_PRT_SCSTS 0x00256C20
-#define I40E_PRT_SCSTS_BSCA_SHIFT 0
-#define I40E_PRT_SCSTS_BSCA_MASK (0x1 << I40E_PRT_SCSTS_BSCA_SHIFT)
-#define I40E_PRT_SCSTS_BSCAP_SHIFT 1
-#define I40E_PRT_SCSTS_BSCAP_MASK (0x1 << I40E_PRT_SCSTS_BSCAP_SHIFT)
-#define I40E_PRT_SCSTS_MSCA_SHIFT 2
-#define I40E_PRT_SCSTS_MSCA_MASK (0x1 << I40E_PRT_SCSTS_MSCA_SHIFT)
-#define I40E_PRT_SCSTS_MSCAP_SHIFT 3
-#define I40E_PRT_SCSTS_MSCAP_MASK (0x1 << I40E_PRT_SCSTS_MSCAP_SHIFT)
-#define I40E_PRT_SWT_BSCCNT 0x00256C60
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT 0
-#define I40E_PRT_SWT_BSCCNT_CCOUNT_MASK (0x1FFFFFF << I40E_PRT_SWT_BSCCNT_CCOUNT_SHIFT)
-#define I40E_PRTTSYN_ADJ 0x001E4280
+#define I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_SWR_DEF_ACT_EN_DEF_ACT_EN_BITMAP_SHIFT)
+#define I40E_PRTTSYN_ADJ 0x001E4280 /* Reset: GLOBR */
 #define I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT 0
-#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK (0x7FFFFFFF << I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
+#define I40E_PRTTSYN_ADJ_TSYNADJ_MASK I40E_MASK(0x7FFFFFFF, I40E_PRTTSYN_ADJ_TSYNADJ_SHIFT)
 #define I40E_PRTTSYN_ADJ_SIGN_SHIFT 31
-#define I40E_PRTTSYN_ADJ_SIGN_MASK (0x1 << I40E_PRTTSYN_ADJ_SIGN_SHIFT)
-#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_ADJ_SIGN_MASK I40E_MASK(0x1, I40E_PRTTSYN_ADJ_SIGN_SHIFT)
+#define I40E_PRTTSYN_AUX_0(_i) (0x001E42A0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_0_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT 0
-#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUT_ENA_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT 1
-#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK (0x3 << I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTMOD_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_OUTMOD_SHIFT)
 #define I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT 3
-#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK (0x1 << I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_0_OUTLVL_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_0_OUTLVL_SHIFT)
 #define I40E_PRTTSYN_AUX_0_PULSEW_SHIFT 8
-#define I40E_PRTTSYN_AUX_0_PULSEW_MASK (0xF << I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
+#define I40E_PRTTSYN_AUX_0_PULSEW_MASK I40E_MASK(0xF, I40E_PRTTSYN_AUX_0_PULSEW_SHIFT)
 #define I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT 16
-#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK (0x3 << I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
-#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_0_EVNTLVL_MASK I40E_MASK(0x3, I40E_PRTTSYN_AUX_0_EVNTLVL_SHIFT)
+#define I40E_PRTTSYN_AUX_1(_i) (0x001E42E0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_AUX_1_MAX_INDEX 1
 #define I40E_PRTTSYN_AUX_1_INSTNT_SHIFT 0
-#define I40E_PRTTSYN_AUX_1_INSTNT_MASK (0x1 << I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
+#define I40E_PRTTSYN_AUX_1_INSTNT_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_INSTNT_SHIFT)
 #define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT 1
-#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK (0x1 << I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
-#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_AUX_1_SAMPLE_TIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_AUX_1_SAMPLE_TIME_SHIFT)
+#define I40E_PRTTSYN_CLKO(_i) (0x001E4240 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_CLKO_MAX_INDEX 1
 #define I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT 0
-#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK (0xFFFFFFFF << I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
-#define I40E_PRTTSYN_CTL0 0x001E4200
+#define I40E_PRTTSYN_CLKO_TSYNCLKO_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_CLKO_TSYNCLKO_SHIFT)
+#define I40E_PRTTSYN_CTL0 0x001E4200 /* Reset: GLOBR */
 #define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT 0
-#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK (0x1 << I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
+#define I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_CLEAR_TSYNTIMER_SHIFT)
 #define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT 1
-#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TXTIME_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT 2
-#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_EVENT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_EVENT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT 3
-#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL0_TGT_INT_ENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TGT_INT_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL0_PF_ID_SHIFT 8
-#define I40E_PRTTSYN_CTL0_PF_ID_MASK (0xF << I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
+#define I40E_PRTTSYN_CTL0_PF_ID_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL0_PF_ID_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNACT_SHIFT 12
-#define I40E_PRTTSYN_CTL0_TSYNACT_MASK (0x3 << I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
+#define I40E_PRTTSYN_CTL0_TSYNACT_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL0_TSYNACT_SHIFT)
 #define I40E_PRTTSYN_CTL0_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL0_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_CTL1 0x00085020
+#define I40E_PRTTSYN_CTL0_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL0_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_CTL1 0x00085020 /* Reset: CORER */
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT 0
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE0_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT 8
-#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK (0xFF << I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V1MESSTYPE1_MASK I40E_MASK(0xFF, I40E_PRTTSYN_CTL1_V1MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT 16
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE0_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE0_SHIFT)
 #define I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT 20
-#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK (0xF << I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
+#define I40E_PRTTSYN_CTL1_V2MESSTYPE1_MASK I40E_MASK(0xF, I40E_PRTTSYN_CTL1_V2MESSTYPE1_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT 24
-#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK (0x3 << I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
+#define I40E_PRTTSYN_CTL1_TSYNTYPE_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_TSYNTYPE_SHIFT)
 #define I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT 26
-#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK (0x3 << I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
+#define I40E_PRTTSYN_CTL1_UDP_ENA_MASK I40E_MASK(0x3, I40E_PRTTSYN_CTL1_UDP_ENA_SHIFT)
 #define I40E_PRTTSYN_CTL1_TSYNENA_SHIFT 31
-#define I40E_PRTTSYN_CTL1_TSYNENA_MASK (0x1 << I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
-#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_CTL1_TSYNENA_MASK I40E_MASK(0x1, I40E_PRTTSYN_CTL1_TSYNENA_SHIFT)
+#define I40E_PRTTSYN_EVNT_H(_i) (0x001E40C0 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT 0
-#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
-#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_H_TSYNEVNT_H_SHIFT)
+#define I40E_PRTTSYN_EVNT_L(_i) (0x001E4080 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_EVNT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT 0
-#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
-#define I40E_PRTTSYN_INC_H 0x001E4060
+#define I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_EVNT_L_TSYNEVNT_L_SHIFT)
+#define I40E_PRTTSYN_INC_H 0x001E4060 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT 0
-#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK (0x3F << I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
-#define I40E_PRTTSYN_INC_L 0x001E4040
+#define I40E_PRTTSYN_INC_H_TSYNINC_H_MASK I40E_MASK(0x3F, I40E_PRTTSYN_INC_H_TSYNINC_H_SHIFT)
+#define I40E_PRTTSYN_INC_L 0x001E4040 /* Reset: GLOBR */
 #define I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT 0
-#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
-#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_INC_L_TSYNINC_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_INC_L_TSYNINC_L_SHIFT)
+#define I40E_PRTTSYN_RXTIME_H(_i) (0x00085040 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_H_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTTSYN_RXTIME_H_RXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_H_RXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_RXTIME_L(_i) (0x000850C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: CORER */
 #define I40E_PRTTSYN_RXTIME_L_MAX_INDEX 3
 #define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
-#define I40E_PRTTSYN_STAT_0 0x001E4220
+#define I40E_PRTTSYN_RXTIME_L_RXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_RXTIME_L_RXTIEM_L_SHIFT)
+#define I40E_PRTTSYN_STAT_0 0x001E4220 /* Reset: GLOBR */
 #define I40E_PRTTSYN_STAT_0_EVENT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_0_EVENT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_EVENT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_0_EVENT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_EVENT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_EVENT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT0_SHIFT 2
-#define I40E_PRTTSYN_STAT_0_TGT0_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT0_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TGT1_SHIFT 3
-#define I40E_PRTTSYN_STAT_0_TGT1_MASK (0x1 << I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
+#define I40E_PRTTSYN_STAT_0_TGT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TGT1_SHIFT)
 #define I40E_PRTTSYN_STAT_0_TXTIME_SHIFT 4
-#define I40E_PRTTSYN_STAT_0_TXTIME_MASK (0x1 << I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
-#define I40E_PRTTSYN_STAT_1 0x00085140
+#define I40E_PRTTSYN_STAT_0_TXTIME_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_0_TXTIME_SHIFT)
+#define I40E_PRTTSYN_STAT_1 0x00085140 /* Reset: CORER */
 #define I40E_PRTTSYN_STAT_1_RXT0_SHIFT 0
-#define I40E_PRTTSYN_STAT_1_RXT0_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT0_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT0_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT1_SHIFT 1
-#define I40E_PRTTSYN_STAT_1_RXT1_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT1_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT1_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT2_SHIFT 2
-#define I40E_PRTTSYN_STAT_1_RXT2_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
+#define I40E_PRTTSYN_STAT_1_RXT2_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT2_SHIFT)
 #define I40E_PRTTSYN_STAT_1_RXT3_SHIFT 3
-#define I40E_PRTTSYN_STAT_1_RXT3_MASK (0x1 << I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
-#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_STAT_1_RXT3_MASK I40E_MASK(0x1, I40E_PRTTSYN_STAT_1_RXT3_SHIFT)
+#define I40E_PRTTSYN_TGT_H(_i) (0x001E4180 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_H_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT 0
-#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
-#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */
+#define I40E_PRTTSYN_TGT_H_TSYNTGTT_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_H_TSYNTGTT_H_SHIFT)
+#define I40E_PRTTSYN_TGT_L(_i) (0x001E4140 + ((_i) * 32)) /* _i=0...1 */ /* Reset: GLOBR */
 #define I40E_PRTTSYN_TGT_L_MAX_INDEX 1
 #define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT 0
-#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
-#define I40E_PRTTSYN_TIME_H 0x001E4120
+#define I40E_PRTTSYN_TGT_L_TSYNTGTT_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TGT_L_TSYNTGTT_L_SHIFT)
+#define I40E_PRTTSYN_TIME_H 0x001E4120 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT 0
-#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
-#define I40E_PRTTSYN_TIME_L 0x001E4100
+#define I40E_PRTTSYN_TIME_H_TSYNTIME_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_H_TSYNTIME_H_SHIFT)
+#define I40E_PRTTSYN_TIME_L 0x001E4100 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT 0
-#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
-#define I40E_PRTTSYN_TXTIME_H 0x001E41E0
+#define I40E_PRTTSYN_TIME_L_TSYNTIME_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TIME_L_TSYNTIME_L_SHIFT)
+#define I40E_PRTTSYN_TXTIME_H 0x001E41E0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
-#define I40E_PRTTSYN_TXTIME_L 0x001E41C0
+#define I40E_PRTTSYN_TXTIME_H_TXTIEM_H_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_H_TXTIEM_H_SHIFT)
+#define I40E_PRTTSYN_TXTIME_L 0x001E41C0 /* Reset: GLOBR */
 #define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT 0
-#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK (0xFFFFFFFF << I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
-#define I40E_GLSCD_QUANTA 0x000B2080
+#define I40E_PRTTSYN_TXTIME_L_TXTIEM_L_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTTSYN_TXTIME_L_TXTIEM_L_SHIFT)
+#define I40E_GLSCD_QUANTA 0x000B2080 /* Reset: CORER */
 #define I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT 0
-#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK (0x7 << I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
-#define I40E_GL_MDET_RX 0x0012A510
+#define I40E_GLSCD_QUANTA_TSCDQUANTA_MASK I40E_MASK(0x7, I40E_GLSCD_QUANTA_TSCDQUANTA_SHIFT)
+#define I40E_GL_MDET_RX 0x0012A510 /* Reset: CORER */
 #define I40E_GL_MDET_RX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_RX_FUNCTION_MASK (0xFF << I40E_GL_MDET_RX_FUNCTION_SHIFT)
+#define I40E_GL_MDET_RX_FUNCTION_MASK I40E_MASK(0xFF, I40E_GL_MDET_RX_FUNCTION_SHIFT)
 #define I40E_GL_MDET_RX_EVENT_SHIFT 8
-#define I40E_GL_MDET_RX_EVENT_MASK (0x1FF << I40E_GL_MDET_RX_EVENT_SHIFT)
+#define I40E_GL_MDET_RX_EVENT_MASK I40E_MASK(0x1FF, I40E_GL_MDET_RX_EVENT_SHIFT)
 #define I40E_GL_MDET_RX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_RX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_RX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_QUEUE_MASK I40E_MASK(0x3FFF, I40E_GL_MDET_RX_QUEUE_SHIFT)
 #define I40E_GL_MDET_RX_VALID_SHIFT 31
-#define I40E_GL_MDET_RX_VALID_MASK (0x1 << I40E_GL_MDET_RX_VALID_SHIFT)
-#define I40E_GL_MDET_TX 0x000E6480
-#define I40E_GL_MDET_TX_FUNCTION_SHIFT 0
-#define I40E_GL_MDET_TX_FUNCTION_MASK (0xFF << I40E_GL_MDET_TX_FUNCTION_SHIFT)
-#define I40E_GL_MDET_TX_EVENT_SHIFT 8
-#define I40E_GL_MDET_TX_EVENT_MASK (0x1FF << I40E_GL_MDET_TX_EVENT_SHIFT)
-#define I40E_GL_MDET_TX_QUEUE_SHIFT 17
-#define I40E_GL_MDET_TX_QUEUE_MASK (0x3FFF << I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_RX_VALID_SHIFT)
+#define I40E_GL_MDET_TX 0x000E6480 /* Reset: CORER */
+#define I40E_GL_MDET_TX_QUEUE_SHIFT 0
+#define I40E_GL_MDET_TX_QUEUE_MASK I40E_MASK(0xFFF, I40E_GL_MDET_TX_QUEUE_SHIFT)
+#define I40E_GL_MDET_TX_VF_NUM_SHIFT 12
+#define I40E_GL_MDET_TX_VF_NUM_MASK I40E_MASK(0x1FF, I40E_GL_MDET_TX_VF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_PF_NUM_SHIFT 21
+#define I40E_GL_MDET_TX_PF_NUM_MASK I40E_MASK(0xF, I40E_GL_MDET_TX_PF_NUM_SHIFT)
+#define I40E_GL_MDET_TX_EVENT_SHIFT 25
+#define I40E_GL_MDET_TX_EVENT_MASK I40E_MASK(0x1F, I40E_GL_MDET_TX_EVENT_SHIFT)
 #define I40E_GL_MDET_TX_VALID_SHIFT 31
-#define I40E_GL_MDET_TX_VALID_MASK (0x1 << I40E_GL_MDET_TX_VALID_SHIFT)
-#define I40E_PF_MDET_RX 0x0012A400
+#define I40E_GL_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_GL_MDET_TX_VALID_SHIFT)
+#define I40E_PF_MDET_RX 0x0012A400 /* Reset: CORER */
 #define I40E_PF_MDET_RX_VALID_SHIFT 0
-#define I40E_PF_MDET_RX_VALID_MASK (0x1 << I40E_PF_MDET_RX_VALID_SHIFT)
-#define I40E_PF_MDET_TX 0x000E6400
+#define I40E_PF_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_RX_VALID_SHIFT)
+#define I40E_PF_MDET_TX 0x000E6400 /* Reset: CORER */
 #define I40E_PF_MDET_TX_VALID_SHIFT 0
-#define I40E_PF_MDET_TX_VALID_MASK (0x1 << I40E_PF_MDET_TX_VALID_SHIFT)
-#define I40E_PF_VT_PFALLOC 0x001C0500
+#define I40E_PF_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_PF_MDET_TX_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC 0x001C0500 /* Reset: CORER */
 #define I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT 0
-#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_FIRSTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_FIRSTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
-#define I40E_PF_VT_PFALLOC_LASTVF_MASK (0xFF << I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
+#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
 #define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK (0x1 << I40E_PF_VT_PFALLOC_VALID_SHIFT)
-#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_RX_MAX_INDEX 127
 #define I40E_VP_MDET_RX_VALID_SHIFT 0
-#define I40E_VP_MDET_RX_VALID_MASK (0x1 << I40E_VP_MDET_RX_VALID_SHIFT)
-#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */
+#define I40E_VP_MDET_RX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_RX_VALID_SHIFT)
+#define I40E_VP_MDET_TX(_VF) (0x000E6000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
 #define I40E_VP_MDET_TX_MAX_INDEX 127
 #define I40E_VP_MDET_TX_VALID_SHIFT 0
-#define I40E_VP_MDET_TX_VALID_MASK (0x1 << I40E_VP_MDET_TX_VALID_SHIFT)
-#define I40E_GLPM_WUMC 0x0006C800
+#define I40E_VP_MDET_TX_VALID_MASK I40E_MASK(0x1, I40E_VP_MDET_TX_VALID_SHIFT)
+#define I40E_GLPM_WUMC 0x0006C800 /* Reset: POR */
 #define I40E_GLPM_WUMC_NOTCO_SHIFT 0
-#define I40E_GLPM_WUMC_NOTCO_MASK (0x1 << I40E_GLPM_WUMC_NOTCO_SHIFT)
+#define I40E_GLPM_WUMC_NOTCO_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_NOTCO_SHIFT)
 #define I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT 1
-#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK (0x1 << I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
+#define I40E_GLPM_WUMC_SRST_PIN_VAL_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_SRST_PIN_VAL_SHIFT)
 #define I40E_GLPM_WUMC_ROL_MODE_SHIFT 2
-#define I40E_GLPM_WUMC_ROL_MODE_MASK (0x1 << I40E_GLPM_WUMC_ROL_MODE_SHIFT)
+#define I40E_GLPM_WUMC_ROL_MODE_MASK I40E_MASK(0x1, I40E_GLPM_WUMC_ROL_MODE_SHIFT)
 #define I40E_GLPM_WUMC_RESERVED_4_SHIFT 3
-#define I40E_GLPM_WUMC_RESERVED_4_MASK (0x1FFF << I40E_GLPM_WUMC_RESERVED_4_SHIFT)
+#define I40E_GLPM_WUMC_RESERVED_4_MASK I40E_MASK(0x1FFF, I40E_GLPM_WUMC_RESERVED_4_SHIFT)
 #define I40E_GLPM_WUMC_MNG_WU_PF_SHIFT 16
-#define I40E_GLPM_WUMC_MNG_WU_PF_MASK (0xFFFF << I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
-#define I40E_PFPM_APM 0x000B8080
+#define I40E_GLPM_WUMC_MNG_WU_PF_MASK I40E_MASK(0xFFFF, I40E_GLPM_WUMC_MNG_WU_PF_SHIFT)
+#define I40E_PFPM_APM 0x000B8080 /* Reset: POR */
 #define I40E_PFPM_APM_APME_SHIFT 0
-#define I40E_PFPM_APM_APME_MASK (0x1 << I40E_PFPM_APM_APME_SHIFT)
-#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */
+#define I40E_PFPM_APM_APME_MASK I40E_MASK(0x1, I40E_PFPM_APM_APME_SHIFT)
+#define I40E_PFPM_FHFT_LENGTH(_i) (0x0006A000 + ((_i) * 128)) /* _i=0...7 */ /* Reset: POR */
 #define I40E_PFPM_FHFT_LENGTH_MAX_INDEX 7
 #define I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT 0
-#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK (0xFF << I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
-#define I40E_PFPM_WUC 0x0006B200
+#define I40E_PFPM_FHFT_LENGTH_LENGTH_MASK I40E_MASK(0xFF, I40E_PFPM_FHFT_LENGTH_LENGTH_SHIFT)
+#define I40E_PFPM_WUC 0x0006B200 /* Reset: POR */
 #define I40E_PFPM_WUC_EN_APM_D0_SHIFT 5
-#define I40E_PFPM_WUC_EN_APM_D0_MASK (0x1 << I40E_PFPM_WUC_EN_APM_D0_SHIFT)
-#define I40E_PFPM_WUFC 0x0006B400
+#define I40E_PFPM_WUC_EN_APM_D0_MASK I40E_MASK(0x1, I40E_PFPM_WUC_EN_APM_D0_SHIFT)
+#define I40E_PFPM_WUFC 0x0006B400 /* Reset: POR */
 #define I40E_PFPM_WUFC_LNKC_SHIFT 0
-#define I40E_PFPM_WUFC_LNKC_MASK (0x1 << I40E_PFPM_WUFC_LNKC_SHIFT)
+#define I40E_PFPM_WUFC_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_LNKC_SHIFT)
 #define I40E_PFPM_WUFC_MAG_SHIFT 1
-#define I40E_PFPM_WUFC_MAG_MASK (0x1 << I40E_PFPM_WUFC_MAG_SHIFT)
+#define I40E_PFPM_WUFC_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MAG_SHIFT)
 #define I40E_PFPM_WUFC_MNG_SHIFT 3
-#define I40E_PFPM_WUFC_MNG_MASK (0x1 << I40E_PFPM_WUFC_MNG_SHIFT)
+#define I40E_PFPM_WUFC_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_MNG_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_ACT_SHIFT 4
-#define I40E_PFPM_WUFC_FLX0_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_ACT_SHIFT 5
-#define I40E_PFPM_WUFC_FLX1_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_ACT_SHIFT 6
-#define I40E_PFPM_WUFC_FLX2_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_ACT_SHIFT 7
-#define I40E_PFPM_WUFC_FLX3_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_ACT_SHIFT 8
-#define I40E_PFPM_WUFC_FLX4_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_ACT_SHIFT 9
-#define I40E_PFPM_WUFC_FLX5_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_ACT_SHIFT 10
-#define I40E_PFPM_WUFC_FLX6_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_ACT_SHIFT 11
-#define I40E_PFPM_WUFC_FLX7_ACT_MASK (0x1 << I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_ACT_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_ACT_SHIFT)
 #define I40E_PFPM_WUFC_FLX0_SHIFT 16
-#define I40E_PFPM_WUFC_FLX0_MASK (0x1 << I40E_PFPM_WUFC_FLX0_SHIFT)
+#define I40E_PFPM_WUFC_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX0_SHIFT)
 #define I40E_PFPM_WUFC_FLX1_SHIFT 17
-#define I40E_PFPM_WUFC_FLX1_MASK (0x1 << I40E_PFPM_WUFC_FLX1_SHIFT)
+#define I40E_PFPM_WUFC_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX1_SHIFT)
 #define I40E_PFPM_WUFC_FLX2_SHIFT 18
-#define I40E_PFPM_WUFC_FLX2_MASK (0x1 << I40E_PFPM_WUFC_FLX2_SHIFT)
+#define I40E_PFPM_WUFC_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX2_SHIFT)
 #define I40E_PFPM_WUFC_FLX3_SHIFT 19
-#define I40E_PFPM_WUFC_FLX3_MASK (0x1 << I40E_PFPM_WUFC_FLX3_SHIFT)
+#define I40E_PFPM_WUFC_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX3_SHIFT)
 #define I40E_PFPM_WUFC_FLX4_SHIFT 20
-#define I40E_PFPM_WUFC_FLX4_MASK (0x1 << I40E_PFPM_WUFC_FLX4_SHIFT)
+#define I40E_PFPM_WUFC_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX4_SHIFT)
 #define I40E_PFPM_WUFC_FLX5_SHIFT 21
-#define I40E_PFPM_WUFC_FLX5_MASK (0x1 << I40E_PFPM_WUFC_FLX5_SHIFT)
+#define I40E_PFPM_WUFC_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX5_SHIFT)
 #define I40E_PFPM_WUFC_FLX6_SHIFT 22
-#define I40E_PFPM_WUFC_FLX6_MASK (0x1 << I40E_PFPM_WUFC_FLX6_SHIFT)
+#define I40E_PFPM_WUFC_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX6_SHIFT)
 #define I40E_PFPM_WUFC_FLX7_SHIFT 23
-#define I40E_PFPM_WUFC_FLX7_MASK (0x1 << I40E_PFPM_WUFC_FLX7_SHIFT)
+#define I40E_PFPM_WUFC_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FLX7_SHIFT)
 #define I40E_PFPM_WUFC_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUFC_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
-#define I40E_PFPM_WUS 0x0006B600
+#define I40E_PFPM_WUFC_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUFC_FW_RST_WK_SHIFT)
+#define I40E_PFPM_WUS 0x0006B600 /* Reset: POR */
 #define I40E_PFPM_WUS_LNKC_SHIFT 0
-#define I40E_PFPM_WUS_LNKC_MASK (0x1 << I40E_PFPM_WUS_LNKC_SHIFT)
+#define I40E_PFPM_WUS_LNKC_MASK I40E_MASK(0x1, I40E_PFPM_WUS_LNKC_SHIFT)
 #define I40E_PFPM_WUS_MAG_SHIFT 1
-#define I40E_PFPM_WUS_MAG_MASK (0x1 << I40E_PFPM_WUS_MAG_SHIFT)
+#define I40E_PFPM_WUS_MAG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MAG_SHIFT)
 #define I40E_PFPM_WUS_PME_STATUS_SHIFT 2
-#define I40E_PFPM_WUS_PME_STATUS_MASK (0x1 << I40E_PFPM_WUS_PME_STATUS_SHIFT)
+#define I40E_PFPM_WUS_PME_STATUS_MASK I40E_MASK(0x1, I40E_PFPM_WUS_PME_STATUS_SHIFT)
 #define I40E_PFPM_WUS_MNG_SHIFT 3
-#define I40E_PFPM_WUS_MNG_MASK (0x1 << I40E_PFPM_WUS_MNG_SHIFT)
+#define I40E_PFPM_WUS_MNG_MASK I40E_MASK(0x1, I40E_PFPM_WUS_MNG_SHIFT)
 #define I40E_PFPM_WUS_FLX0_SHIFT 16
-#define I40E_PFPM_WUS_FLX0_MASK (0x1 << I40E_PFPM_WUS_FLX0_SHIFT)
+#define I40E_PFPM_WUS_FLX0_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX0_SHIFT)
 #define I40E_PFPM_WUS_FLX1_SHIFT 17
-#define I40E_PFPM_WUS_FLX1_MASK (0x1 << I40E_PFPM_WUS_FLX1_SHIFT)
+#define I40E_PFPM_WUS_FLX1_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX1_SHIFT)
 #define I40E_PFPM_WUS_FLX2_SHIFT 18
-#define I40E_PFPM_WUS_FLX2_MASK (0x1 << I40E_PFPM_WUS_FLX2_SHIFT)
+#define I40E_PFPM_WUS_FLX2_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX2_SHIFT)
 #define I40E_PFPM_WUS_FLX3_SHIFT 19
-#define I40E_PFPM_WUS_FLX3_MASK (0x1 << I40E_PFPM_WUS_FLX3_SHIFT)
+#define I40E_PFPM_WUS_FLX3_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX3_SHIFT)
 #define I40E_PFPM_WUS_FLX4_SHIFT 20
-#define I40E_PFPM_WUS_FLX4_MASK (0x1 << I40E_PFPM_WUS_FLX4_SHIFT)
+#define I40E_PFPM_WUS_FLX4_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX4_SHIFT)
 #define I40E_PFPM_WUS_FLX5_SHIFT 21
-#define I40E_PFPM_WUS_FLX5_MASK (0x1 << I40E_PFPM_WUS_FLX5_SHIFT)
+#define I40E_PFPM_WUS_FLX5_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX5_SHIFT)
 #define I40E_PFPM_WUS_FLX6_SHIFT 22
-#define I40E_PFPM_WUS_FLX6_MASK (0x1 << I40E_PFPM_WUS_FLX6_SHIFT)
+#define I40E_PFPM_WUS_FLX6_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX6_SHIFT)
 #define I40E_PFPM_WUS_FLX7_SHIFT 23
-#define I40E_PFPM_WUS_FLX7_MASK (0x1 << I40E_PFPM_WUS_FLX7_SHIFT)
+#define I40E_PFPM_WUS_FLX7_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FLX7_SHIFT)
 #define I40E_PFPM_WUS_FW_RST_WK_SHIFT 31
-#define I40E_PFPM_WUS_FW_RST_WK_MASK (0x1 << I40E_PFPM_WUS_FW_RST_WK_SHIFT)
-#define I40E_PRTPM_FHFHR 0x0006C000
+#define I40E_PFPM_WUS_FW_RST_WK_MASK I40E_MASK(0x1, I40E_PFPM_WUS_FW_RST_WK_SHIFT)
+#define I40E_PRTPM_FHFHR 0x0006C000 /* Reset: POR */
 #define I40E_PRTPM_FHFHR_UNICAST_SHIFT 0
-#define I40E_PRTPM_FHFHR_UNICAST_MASK (0x1 << I40E_PRTPM_FHFHR_UNICAST_SHIFT)
+#define I40E_PRTPM_FHFHR_UNICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_UNICAST_SHIFT)
 #define I40E_PRTPM_FHFHR_MULTICAST_SHIFT 1
-#define I40E_PRTPM_FHFHR_MULTICAST_MASK (0x1 << I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
-#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_FHFHR_MULTICAST_MASK I40E_MASK(0x1, I40E_PRTPM_FHFHR_MULTICAST_SHIFT)
+#define I40E_PRTPM_SAH(_i) (0x001E44C0 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAH_MAX_INDEX 3
 #define I40E_PRTPM_SAH_PFPM_SAH_SHIFT 0
-#define I40E_PRTPM_SAH_PFPM_SAH_MASK (0xFFFF << I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
+#define I40E_PRTPM_SAH_PFPM_SAH_MASK I40E_MASK(0xFFFF, I40E_PRTPM_SAH_PFPM_SAH_SHIFT)
 #define I40E_PRTPM_SAH_PF_NUM_SHIFT 26
-#define I40E_PRTPM_SAH_PF_NUM_MASK (0xF << I40E_PRTPM_SAH_PF_NUM_SHIFT)
+#define I40E_PRTPM_SAH_PF_NUM_MASK I40E_MASK(0xF, I40E_PRTPM_SAH_PF_NUM_SHIFT)
 #define I40E_PRTPM_SAH_MC_MAG_EN_SHIFT 30
-#define I40E_PRTPM_SAH_MC_MAG_EN_MASK (0x1 << I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
+#define I40E_PRTPM_SAH_MC_MAG_EN_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_MC_MAG_EN_SHIFT)
 #define I40E_PRTPM_SAH_AV_SHIFT 31
-#define I40E_PRTPM_SAH_AV_MASK (0x1 << I40E_PRTPM_SAH_AV_SHIFT)
-#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */
+#define I40E_PRTPM_SAH_AV_MASK I40E_MASK(0x1, I40E_PRTPM_SAH_AV_SHIFT)
+#define I40E_PRTPM_SAL(_i) (0x001E4440 + ((_i) * 32)) /* _i=0...3 */ /* Reset: PFR */
 #define I40E_PRTPM_SAL_MAX_INDEX 3
 #define I40E_PRTPM_SAL_PFPM_SAL_SHIFT 0
-#define I40E_PRTPM_SAL_PFPM_SAL_MASK (0xFFFFFFFF << I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
-#define I40E_VF_ARQBAH1 0x00006000
+#define I40E_PRTPM_SAL_PFPM_SAL_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_SAL_PFPM_SAL_SHIFT)
+#define I40E_VF_ARQBAH1 0x00006000 /* Reset: EMPR */
 #define I40E_VF_ARQBAH1_ARQBAH_SHIFT 0
-#define I40E_VF_ARQBAH1_ARQBAH_MASK (0xFFFFFFFF << I40E_VF_ARQBAH1_ARQBAH_SHIFT)
-#define I40E_VF_ARQBAL1 0x00006C00
+#define I40E_VF_ARQBAH1_ARQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAH1_ARQBAH_SHIFT)
+#define I40E_VF_ARQBAL1 0x00006C00 /* Reset: EMPR */
 #define I40E_VF_ARQBAL1_ARQBAL_SHIFT 0
-#define I40E_VF_ARQBAL1_ARQBAL_MASK (0xFFFFFFFF << I40E_VF_ARQBAL1_ARQBAL_SHIFT)
-#define I40E_VF_ARQH1 0x00007400
+#define I40E_VF_ARQBAL1_ARQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ARQBAL1_ARQBAL_SHIFT)
+#define I40E_VF_ARQH1 0x00007400 /* Reset: EMPR */
 #define I40E_VF_ARQH1_ARQH_SHIFT 0
-#define I40E_VF_ARQH1_ARQH_MASK (0x3FF << I40E_VF_ARQH1_ARQH_SHIFT)
-#define I40E_VF_ARQLEN1 0x00008000
+#define I40E_VF_ARQH1_ARQH_MASK I40E_MASK(0x3FF, I40E_VF_ARQH1_ARQH_SHIFT)
+#define I40E_VF_ARQLEN1 0x00008000 /* Reset: EMPR */
 #define I40E_VF_ARQLEN1_ARQLEN_SHIFT 0
-#define I40E_VF_ARQLEN1_ARQLEN_MASK (0x3FF << I40E_VF_ARQLEN1_ARQLEN_SHIFT)
+#define I40E_VF_ARQLEN1_ARQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ARQLEN1_ARQLEN_SHIFT)
 #define I40E_VF_ARQLEN1_ARQVFE_SHIFT 28
-#define I40E_VF_ARQLEN1_ARQVFE_MASK (0x1 << I40E_VF_ARQLEN1_ARQVFE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQVFE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQVFE_SHIFT)
 #define I40E_VF_ARQLEN1_ARQOVFL_SHIFT 29
-#define I40E_VF_ARQLEN1_ARQOVFL_MASK (0x1 << I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
+#define I40E_VF_ARQLEN1_ARQOVFL_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQOVFL_SHIFT)
 #define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
-#define I40E_VF_ARQLEN1_ARQCRIT_MASK (0x1 << I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
+#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
 #define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK (0x1 << I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
-#define I40E_VF_ARQT1 0x00007000
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
 #define I40E_VF_ARQT1_ARQT_SHIFT 0
-#define I40E_VF_ARQT1_ARQT_MASK (0x3FF << I40E_VF_ARQT1_ARQT_SHIFT)
-#define I40E_VF_ATQBAH1 0x00007800
+#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
+#define I40E_VF_ATQBAH1 0x00007800 /* Reset: EMPR */
 #define I40E_VF_ATQBAH1_ATQBAH_SHIFT 0
-#define I40E_VF_ATQBAH1_ATQBAH_MASK (0xFFFFFFFF << I40E_VF_ATQBAH1_ATQBAH_SHIFT)
-#define I40E_VF_ATQBAL1 0x00007C00
+#define I40E_VF_ATQBAH1_ATQBAH_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAH1_ATQBAH_SHIFT)
+#define I40E_VF_ATQBAL1 0x00007C00 /* Reset: EMPR */
 #define I40E_VF_ATQBAL1_ATQBAL_SHIFT 0
-#define I40E_VF_ATQBAL1_ATQBAL_MASK (0xFFFFFFFF << I40E_VF_ATQBAL1_ATQBAL_SHIFT)
-#define I40E_VF_ATQH1 0x00006400
+#define I40E_VF_ATQBAL1_ATQBAL_MASK I40E_MASK(0xFFFFFFFF, I40E_VF_ATQBAL1_ATQBAL_SHIFT)
+#define I40E_VF_ATQH1 0x00006400 /* Reset: EMPR */
 #define I40E_VF_ATQH1_ATQH_SHIFT 0
-#define I40E_VF_ATQH1_ATQH_MASK (0x3FF << I40E_VF_ATQH1_ATQH_SHIFT)
-#define I40E_VF_ATQLEN1 0x00006800
+#define I40E_VF_ATQH1_ATQH_MASK I40E_MASK(0x3FF, I40E_VF_ATQH1_ATQH_SHIFT)
+#define I40E_VF_ATQLEN1 0x00006800 /* Reset: EMPR */
 #define I40E_VF_ATQLEN1_ATQLEN_SHIFT 0
-#define I40E_VF_ATQLEN1_ATQLEN_MASK (0x3FF << I40E_VF_ATQLEN1_ATQLEN_SHIFT)
+#define I40E_VF_ATQLEN1_ATQLEN_MASK I40E_MASK(0x3FF, I40E_VF_ATQLEN1_ATQLEN_SHIFT)
 #define I40E_VF_ATQLEN1_ATQVFE_SHIFT 28
-#define I40E_VF_ATQLEN1_ATQVFE_MASK (0x1 << I40E_VF_ATQLEN1_ATQVFE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQVFE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQVFE_SHIFT)
 #define I40E_VF_ATQLEN1_ATQOVFL_SHIFT 29
-#define I40E_VF_ATQLEN1_ATQOVFL_MASK (0x1 << I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
+#define I40E_VF_ATQLEN1_ATQOVFL_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQOVFL_SHIFT)
 #define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
-#define I40E_VF_ATQLEN1_ATQCRIT_MASK (0x1 << I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
+#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
 #define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK (0x1 << I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
-#define I40E_VF_ATQT1 0x00008400
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
 #define I40E_VF_ATQT1_ATQT_SHIFT 0
-#define I40E_VF_ATQT1_ATQT_MASK (0x3FF << I40E_VF_ATQT1_ATQT_SHIFT)
-#define I40E_VFGEN_RSTAT 0x00008800
+#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
+#define I40E_VFGEN_RSTAT 0x00008800 /* Reset: VFR */
 #define I40E_VFGEN_RSTAT_VFR_STATE_SHIFT 0
-#define I40E_VFGEN_RSTAT_VFR_STATE_MASK (0x3 << I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
-#define I40E_VFINT_DYN_CTL01 0x00005C00
+#define I40E_VFGEN_RSTAT_VFR_STATE_MASK I40E_MASK(0x3, I40E_VFGEN_RSTAT_VFR_STATE_SHIFT)
+#define I40E_VFINT_DYN_CTL01 0x00005C00 /* Reset: VFR */
 #define I40E_VFINT_DYN_CTL01_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTL01_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTL01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTL01_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTL01_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTL01_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
-#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4))
+#define I40E_VFINT_DYN_CTL01_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_INTENA_MSK_SHIFT)
+#define I40E_VFINT_DYN_CTLN1(_INTVF) (0x00003800 + ((_INTVF) * 4)) /* _i=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_DYN_CTLN1_MAX_INDEX 15
 #define I40E_VFINT_DYN_CTLN1_INTENA_SHIFT 0
-#define I40E_VFINT_DYN_CTLN1_INTENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT 1
-#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_CLEARPBA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT 2
-#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SWINT_TRIG_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT 3
-#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT 5
-#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK (0xFFF << I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_DYN_CTLN1_INTERVAL_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT 24
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK (0x1 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_ENA_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT 25
-#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK (0x3 << I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_DYN_CTLN1_SW_ITR_INDX_SHIFT)
 #define I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT 31
-#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK (0x1 << I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
-#define I40E_VFINT_ICR0_ENA1 0x00005000
+#define I40E_VFINT_DYN_CTLN1_INTENA_MSK_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_INTENA_MSK_SHIFT)
+#define I40E_VFINT_ICR0_ENA1 0x00005000 /* Reset: CORER */
 #define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK (0x1 << I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR0_ENA1_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR0_ENA1_RSVD_SHIFT 31
-#define I40E_VFINT_ICR0_ENA1_RSVD_MASK (0x1 << I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
-#define I40E_VFINT_ICR01 0x00004800
+#define I40E_VFINT_ICR0_ENA1_RSVD_MASK I40E_MASK(0x1, I40E_VFINT_ICR0_ENA1_RSVD_SHIFT)
+#define I40E_VFINT_ICR01 0x00004800 /* Reset: CORER */
 #define I40E_VFINT_ICR01_INTEVENT_SHIFT 0
-#define I40E_VFINT_ICR01_INTEVENT_MASK (0x1 << I40E_VFINT_ICR01_INTEVENT_SHIFT)
+#define I40E_VFINT_ICR01_INTEVENT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_INTEVENT_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_0_SHIFT 1
-#define I40E_VFINT_ICR01_QUEUE_0_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_0_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_0_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_0_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_1_SHIFT 2
-#define I40E_VFINT_ICR01_QUEUE_1_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_1_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_1_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_1_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_2_SHIFT 3
-#define I40E_VFINT_ICR01_QUEUE_2_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_2_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_2_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_2_SHIFT)
 #define I40E_VFINT_ICR01_QUEUE_3_SHIFT 4
-#define I40E_VFINT_ICR01_QUEUE_3_MASK (0x1 << I40E_VFINT_ICR01_QUEUE_3_SHIFT)
+#define I40E_VFINT_ICR01_QUEUE_3_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_QUEUE_3_SHIFT)
 #define I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT 25
-#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK (0x1 << I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
+#define I40E_VFINT_ICR01_LINK_STAT_CHANGE_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_LINK_STAT_CHANGE_SHIFT)
 #define I40E_VFINT_ICR01_ADMINQ_SHIFT 30
-#define I40E_VFINT_ICR01_ADMINQ_MASK (0x1 << I40E_VFINT_ICR01_ADMINQ_SHIFT)
+#define I40E_VFINT_ICR01_ADMINQ_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_ADMINQ_SHIFT)
 #define I40E_VFINT_ICR01_SWINT_SHIFT 31
-#define I40E_VFINT_ICR01_SWINT_MASK (0x1 << I40E_VFINT_ICR01_SWINT_SHIFT)
-#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */
+#define I40E_VFINT_ICR01_SWINT_MASK I40E_MASK(0x1, I40E_VFINT_ICR01_SWINT_SHIFT)
+#define I40E_VFINT_ITR01(_i) (0x00004C00 + ((_i) * 4)) /* _i=0...2 */ /* Reset: VFR */
 #define I40E_VFINT_ITR01_MAX_INDEX 2
 #define I40E_VFINT_ITR01_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITR01_INTERVAL_MASK (0xFFF << I40E_VFINT_ITR01_INTERVAL_SHIFT)
-#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4))
+#define I40E_VFINT_ITR01_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITR01_INTERVAL_SHIFT)
+#define I40E_VFINT_ITRN1(_i, _INTVF) (0x00002800 + ((_i) * 64 + (_INTVF) * 4)) /* _i=0...2, _INTVF=0...15 */ /* Reset: VFR */
 #define I40E_VFINT_ITRN1_MAX_INDEX 2
 #define I40E_VFINT_ITRN1_INTERVAL_SHIFT 0
-#define I40E_VFINT_ITRN1_INTERVAL_MASK (0xFFF << I40E_VFINT_ITRN1_INTERVAL_SHIFT)
-#define I40E_VFINT_STAT_CTL01 0x00005400
+#define I40E_VFINT_ITRN1_INTERVAL_MASK I40E_MASK(0xFFF, I40E_VFINT_ITRN1_INTERVAL_SHIFT)
+#define I40E_VFINT_STAT_CTL01 0x00005400 /* Reset: VFR */
 #define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT 2
-#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK (0x3 << I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
-#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_MASK I40E_MASK(0x3, I40E_VFINT_STAT_CTL01_OTHER_ITR_INDX_SHIFT)
+#define I40E_QRX_TAIL1(_Q) (0x00002000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_QRX_TAIL1_MAX_INDEX 15
 #define I40E_QRX_TAIL1_TAIL_SHIFT 0
-#define I40E_QRX_TAIL1_TAIL_MASK (0x1FFF << I40E_QRX_TAIL1_TAIL_SHIFT)
-#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */
+#define I40E_QRX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QRX_TAIL1_TAIL_SHIFT)
+#define I40E_QTX_TAIL1(_Q) (0x00000000 + ((_Q) * 4)) /* _i=0...15 */ /* Reset: PFR */
 #define I40E_QTX_TAIL1_MAX_INDEX 15
 #define I40E_QTX_TAIL1_TAIL_SHIFT 0
-#define I40E_QTX_TAIL1_TAIL_MASK (0x1FFF << I40E_QTX_TAIL1_TAIL_SHIFT)
-#define I40E_VFMSIX_PBA 0x00002000
+#define I40E_QTX_TAIL1_TAIL_MASK I40E_MASK(0x1FFF, I40E_QTX_TAIL1_TAIL_SHIFT)
+#define I40E_VFMSIX_PBA 0x00002000 /* Reset: VFLR */
 #define I40E_VFMSIX_PBA_PENBIT_SHIFT 0
-#define I40E_VFMSIX_PBA_PENBIT_MASK (0xFFFFFFFF << I40E_VFMSIX_PBA_PENBIT_SHIFT)
-#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_PBA_PENBIT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_PBA_PENBIT_SHIFT)
+#define I40E_VFMSIX_TADD(_i) (0x00000000 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TADD_MAX_INDEX 16
 #define I40E_VFMSIX_TADD_MSIXTADD10_SHIFT 0
-#define I40E_VFMSIX_TADD_MSIXTADD10_MASK (0x3 << I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
+#define I40E_VFMSIX_TADD_MSIXTADD10_MASK I40E_MASK(0x3, I40E_VFMSIX_TADD_MSIXTADD10_SHIFT)
 #define I40E_VFMSIX_TADD_MSIXTADD_SHIFT 2
-#define I40E_VFMSIX_TADD_MSIXTADD_MASK (0x3FFFFFFF << I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
-#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TADD_MSIXTADD_MASK I40E_MASK(0x3FFFFFFF, I40E_VFMSIX_TADD_MSIXTADD_SHIFT)
+#define I40E_VFMSIX_TMSG(_i) (0x00000008 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TMSG_MAX_INDEX 16
 #define I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT 0
-#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK (0xFFFFFFFF << I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
-#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TMSG_MSIXTMSG_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TMSG_MSIXTMSG_SHIFT)
+#define I40E_VFMSIX_TUADD(_i) (0x00000004 + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TUADD_MAX_INDEX 16
 #define I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT 0
-#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK (0xFFFFFFFF << I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
-#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */
+#define I40E_VFMSIX_TUADD_MSIXTUADD_MASK I40E_MASK(0xFFFFFFFF, I40E_VFMSIX_TUADD_MSIXTUADD_SHIFT)
+#define I40E_VFMSIX_TVCTRL(_i) (0x0000000C + ((_i) * 16)) /* _i=0...16 */ /* Reset: VFLR */
 #define I40E_VFMSIX_TVCTRL_MAX_INDEX 16
 #define I40E_VFMSIX_TVCTRL_MASK_SHIFT 0
-#define I40E_VFMSIX_TVCTRL_MASK_MASK (0x1 << I40E_VFMSIX_TVCTRL_MASK_SHIFT)
-#define I40E_VFCM_PE_ERRDATA 0x0000DC00
+#define I40E_VFMSIX_TVCTRL_MASK_MASK I40E_MASK(0x1, I40E_VFMSIX_TVCTRL_MASK_SHIFT)
+#define I40E_VFCM_PE_ERRDATA 0x0000DC00 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
-#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK (0xF << I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_VFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
-#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK (0x7 << I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_VFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRDATA_Q_TYPE_SHIFT)
 #define I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT 8
-#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK (0x3FFFF << I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
-#define I40E_VFCM_PE_ERRINFO 0x0000D800
+#define I40E_VFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_VFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_VFCM_PE_ERRINFO 0x0000D800 /* Reset: VFR */
 #define I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
-#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK (0x1 << I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_VFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
-#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK (0x7 << I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_VFCM_PE_ERRINFO_ERROR_INST_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
-#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
-#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
 #define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
-#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK (0xFF << I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
-#define I40E_VFPE_AEQALLOC1 0x0000A400
-#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
-#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK (0xFFFFFFFF << I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
-#define I40E_VFPE_CCQPHIGH1 0x00009800
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
-#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK (0xFFFFFFFF << I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
-#define I40E_VFPE_CCQPLOW1 0x0000AC00
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
-#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK (0xFFFFFFFF << I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1 0x0000B800
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
-#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
-#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK (0x1 << I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
-#define I40E_VFPE_CQACK1 0x0000B000
-#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
-#define I40E_VFPE_CQACK1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQACK1_PECQID_SHIFT)
-#define I40E_VFPE_CQARM1 0x0000B400
-#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
-#define I40E_VFPE_CQARM1_PECQID_MASK (0x1FFFF << I40E_VFPE_CQARM1_PECQID_SHIFT)
-#define I40E_VFPE_CQPDB1 0x0000BC00
-#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
-#define I40E_VFPE_CQPDB1_WQHEAD_MASK (0x7FF << I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
-#define I40E_VFPE_CQPERRCODES1 0x00009C00
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
-#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
-#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK (0xFFFF << I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
-#define I40E_VFPE_CQPTAIL1 0x0000A000
-#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
-#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK (0x7FF << I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
-#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK (0x1 << I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
-#define I40E_VFPE_IPCONFIG01 0x00008C00
-#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
-#define I40E_VFPE_IPCONFIG01_PEIPID_MASK (0xFFFF << I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
-#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK (0x1 << I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
-#define I40E_VFPE_MRTEIDXMASK1 0x00009000
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
-#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK (0x1F << I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
-#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
-#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK (0xFFFFFF << I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
-#define I40E_VFPE_TCPNOWTIMER1 0x0000A800
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
-#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK (0xFFFFFFFF << I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
-#define I40E_VFPE_WQEALLOC1 0x0000C000
-#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
-#define I40E_VFPE_WQEALLOC1_PEQPID_MASK (0x3FFFF << I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
-#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK (0xFFF << I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
-#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */
+#define I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_VFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+#define I40E_VFQF_HENA(_i) (0x0000C400 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
 #define I40E_VFQF_HENA_MAX_INDEX 1
 #define I40E_VFQF_HENA_PTYPE_ENA_SHIFT 0
-#define I40E_VFQF_HENA_PTYPE_ENA_MASK (0xFFFFFFFF << I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
-#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */
+#define I40E_VFQF_HENA_PTYPE_ENA_MASK I40E_MASK(0xFFFFFFFF, I40E_VFQF_HENA_PTYPE_ENA_SHIFT)
+#define I40E_VFQF_HKEY(_i) (0x0000CC00 + ((_i) * 4)) /* _i=0...12 */ /* Reset: CORER */
 #define I40E_VFQF_HKEY_MAX_INDEX 12
 #define I40E_VFQF_HKEY_KEY_0_SHIFT 0
-#define I40E_VFQF_HKEY_KEY_0_MASK (0xFF << I40E_VFQF_HKEY_KEY_0_SHIFT)
+#define I40E_VFQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_0_SHIFT)
 #define I40E_VFQF_HKEY_KEY_1_SHIFT 8
-#define I40E_VFQF_HKEY_KEY_1_MASK (0xFF << I40E_VFQF_HKEY_KEY_1_SHIFT)
+#define I40E_VFQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_1_SHIFT)
 #define I40E_VFQF_HKEY_KEY_2_SHIFT 16
-#define I40E_VFQF_HKEY_KEY_2_MASK (0xFF << I40E_VFQF_HKEY_KEY_2_SHIFT)
+#define I40E_VFQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_2_SHIFT)
 #define I40E_VFQF_HKEY_KEY_3_SHIFT 24
-#define I40E_VFQF_HKEY_KEY_3_MASK (0xFF << I40E_VFQF_HKEY_KEY_3_SHIFT)
-#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */
+#define I40E_VFQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VFQF_HKEY_KEY_3_SHIFT)
+#define I40E_VFQF_HLUT(_i) (0x0000D000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
 #define I40E_VFQF_HLUT_MAX_INDEX 15
 #define I40E_VFQF_HLUT_LUT0_SHIFT 0
-#define I40E_VFQF_HLUT_LUT0_MASK (0xF << I40E_VFQF_HLUT_LUT0_SHIFT)
+#define I40E_VFQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT0_SHIFT)
 #define I40E_VFQF_HLUT_LUT1_SHIFT 8
-#define I40E_VFQF_HLUT_LUT1_MASK (0xF << I40E_VFQF_HLUT_LUT1_SHIFT)
+#define I40E_VFQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT1_SHIFT)
 #define I40E_VFQF_HLUT_LUT2_SHIFT 16
-#define I40E_VFQF_HLUT_LUT2_MASK (0xF << I40E_VFQF_HLUT_LUT2_SHIFT)
+#define I40E_VFQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT2_SHIFT)
 #define I40E_VFQF_HLUT_LUT3_SHIFT 24
-#define I40E_VFQF_HLUT_LUT3_MASK (0xF << I40E_VFQF_HLUT_LUT3_SHIFT)
-#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */
+#define I40E_VFQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VFQF_HLUT_LUT3_SHIFT)
+#define I40E_VFQF_HREGION(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...7 */ /* Reset: CORER */
 #define I40E_VFQF_HREGION_MAX_INDEX 7
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
 #define I40E_VFQF_HREGION_REGION_0_SHIFT 1
-#define I40E_VFQF_HREGION_REGION_0_MASK (0x7 << I40E_VFQF_HREGION_REGION_0_SHIFT)
+#define I40E_VFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_0_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
 #define I40E_VFQF_HREGION_REGION_1_SHIFT 5
-#define I40E_VFQF_HREGION_REGION_1_MASK (0x7 << I40E_VFQF_HREGION_REGION_1_SHIFT)
+#define I40E_VFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_1_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
 #define I40E_VFQF_HREGION_REGION_2_SHIFT 9
-#define I40E_VFQF_HREGION_REGION_2_MASK (0x7 << I40E_VFQF_HREGION_REGION_2_SHIFT)
+#define I40E_VFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_2_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
 #define I40E_VFQF_HREGION_REGION_3_SHIFT 13
-#define I40E_VFQF_HREGION_REGION_3_MASK (0x7 << I40E_VFQF_HREGION_REGION_3_SHIFT)
+#define I40E_VFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_3_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
 #define I40E_VFQF_HREGION_REGION_4_SHIFT 17
-#define I40E_VFQF_HREGION_REGION_4_MASK (0x7 << I40E_VFQF_HREGION_REGION_4_SHIFT)
+#define I40E_VFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_4_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
 #define I40E_VFQF_HREGION_REGION_5_SHIFT 21
-#define I40E_VFQF_HREGION_REGION_5_MASK (0x7 << I40E_VFQF_HREGION_REGION_5_SHIFT)
+#define I40E_VFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_5_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
 #define I40E_VFQF_HREGION_REGION_6_SHIFT 25
-#define I40E_VFQF_HREGION_REGION_6_MASK (0x7 << I40E_VFQF_HREGION_REGION_6_SHIFT)
+#define I40E_VFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_6_SHIFT)
 #define I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
-#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK (0x1 << I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
 #define I40E_VFQF_HREGION_REGION_7_SHIFT 29
-#define I40E_VFQF_HREGION_REGION_7_MASK (0x7 << I40E_VFQF_HREGION_REGION_7_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS 0x00270110
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT 0
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_WR_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT 8
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_RD_ACCESS_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT 16
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_MASK (0xFF << I40E_RCU_PST_FOC_ACCESS_STATUS_ERR_CNT_SHIFT)
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT 24
-#define I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_MASK (0x7 << I40E_RCU_PST_FOC_ACCESS_STATUS_LAST_ERR_CODE_SHIFT)
+#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
 #endif
index 48ebb6c..79bf96c 100644 (file)
@@ -50,7 +50,11 @@ static void i40e_unmap_and_free_tx_resource(struct i40e_ring *ring,
                                            struct i40e_tx_buffer *tx_buffer)
 {
        if (tx_buffer->skb) {
-               dev_kfree_skb_any(tx_buffer->skb);
+               if (tx_buffer->tx_flags & I40E_TX_FLAGS_FD_SB)
+                       kfree(tx_buffer->raw_buf);
+               else
+                       dev_kfree_skb_any(tx_buffer->skb);
+
                if (dma_unmap_len(tx_buffer, len))
                        dma_unmap_single(ring->dev,
                                         dma_unmap_addr(tx_buffer, dma),
@@ -769,8 +773,6 @@ static inline void i40e_rx_checksum(struct i40e_vsi *vsi,
 
        /* likely incorrect csum if alternate IP extension headers found */
        if (ipv6 &&
-           decoded.inner_prot == I40E_RX_PTYPE_INNER_PROT_TCP &&
-           rx_error & (1 << I40E_RX_DESC_ERROR_L4E_SHIFT) &&
            rx_status & (1 << I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT))
                /* don't increment checksum err here, non-fatal err */
                return;
@@ -1336,6 +1338,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
        /* cpu_to_le32 and assign to struct fields */
        context_desc->tunneling_params = cpu_to_le32(cd_tunneling);
        context_desc->l2tag2 = cpu_to_le16(cd_l2tag2);
+       context_desc->rsvd = cpu_to_le16(0);
        context_desc->type_cmd_tso_mss = cpu_to_le64(cd_type_cmd_tso_mss);
 }
 
index 30d248b..8bc6858 100644 (file)
@@ -75,7 +75,6 @@ enum i40e_dyn_idx_t {
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
        ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
-       ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
        ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
@@ -131,6 +130,7 @@ enum i40e_dyn_idx_t {
 #define I40E_TX_FLAGS_IPV6             (u32)(1 << 5)
 #define I40E_TX_FLAGS_FCCRC            (u32)(1 << 6)
 #define I40E_TX_FLAGS_FSO              (u32)(1 << 7)
+#define I40E_TX_FLAGS_FD_SB            (u32)(1 << 9)
 #define I40E_TX_FLAGS_VLAN_MASK                0xffff0000
 #define I40E_TX_FLAGS_VLAN_PRIO_MASK   0xe0000000
 #define I40E_TX_FLAGS_VLAN_PRIO_SHIFT  29
@@ -139,7 +139,10 @@ enum i40e_dyn_idx_t {
 struct i40e_tx_buffer {
        struct i40e_tx_desc *next_to_watch;
        unsigned long time_stamp;
-       struct sk_buff *skb;
+       union {
+               struct sk_buff *skb;
+               void *raw_buf;
+       };
        unsigned int bytecount;
        unsigned short gso_segs;
        DEFINE_DMA_UNMAP_ADDR(dma);
index d3cf5a6..1537643 100644 (file)
@@ -50,6 +50,9 @@
                                         (d) == I40E_DEV_ID_QSFP_B  || \
                                         (d) == I40E_DEV_ID_QSFP_C)
 
+/* I40E_MASK is a macro used on 32 bit registers */
+#define I40E_MASK(mask, shift) (mask << shift)
+
 #define I40E_MAX_VSI_QP                        16
 #define I40E_MAX_VF_VSI                        3
 #define I40E_MAX_CHAINED_RX_BUFFERS    5
@@ -137,6 +140,14 @@ enum i40e_fc_mode {
        I40E_FC_DEFAULT
 };
 
+enum i40e_set_fc_aq_failures {
+       I40E_SET_FC_AQ_FAIL_NONE = 0,
+       I40E_SET_FC_AQ_FAIL_GET = 1,
+       I40E_SET_FC_AQ_FAIL_SET = 2,
+       I40E_SET_FC_AQ_FAIL_UPDATE = 4,
+       I40E_SET_FC_AQ_FAIL_SET_UPDATE = 6
+};
+
 enum i40e_vsi_type {
        I40E_VSI_MAIN = 0,
        I40E_VSI_VMDQ1,
@@ -163,6 +174,7 @@ struct i40e_link_status {
        u8 an_info;
        u8 ext_info;
        u8 loopback;
+       bool an_enabled;
        /* is Link Status Event notification to SW enabled */
        bool lse_enable;
        u16 max_frame_size;
@@ -256,6 +268,61 @@ struct i40e_nvm_info {
        u32 eetrack;              /* NVM data version */
 };
 
+/* definitions used in NVM update support */
+
+enum i40e_nvmupd_cmd {
+       I40E_NVMUPD_INVALID,
+       I40E_NVMUPD_READ_CON,
+       I40E_NVMUPD_READ_SNT,
+       I40E_NVMUPD_READ_LCB,
+       I40E_NVMUPD_READ_SA,
+       I40E_NVMUPD_WRITE_ERA,
+       I40E_NVMUPD_WRITE_CON,
+       I40E_NVMUPD_WRITE_SNT,
+       I40E_NVMUPD_WRITE_LCB,
+       I40E_NVMUPD_WRITE_SA,
+       I40E_NVMUPD_CSUM_CON,
+       I40E_NVMUPD_CSUM_SA,
+       I40E_NVMUPD_CSUM_LCB,
+};
+
+enum i40e_nvmupd_state {
+       I40E_NVMUPD_STATE_INIT,
+       I40E_NVMUPD_STATE_READING,
+       I40E_NVMUPD_STATE_WRITING
+};
+
+/* nvm_access definition and its masks/shifts need to be accessible to
+ * application, core driver, and shared code.  Where is the right file?
+ */
+#define I40E_NVM_READ  0xB
+#define I40E_NVM_WRITE 0xC
+
+#define I40E_NVM_MOD_PNT_MASK 0xFF
+
+#define I40E_NVM_TRANS_SHIFT   8
+#define I40E_NVM_TRANS_MASK    (0xf << I40E_NVM_TRANS_SHIFT)
+#define I40E_NVM_CON           0x0
+#define I40E_NVM_SNT           0x1
+#define I40E_NVM_LCB           0x2
+#define I40E_NVM_SA            (I40E_NVM_SNT | I40E_NVM_LCB)
+#define I40E_NVM_ERA           0x4
+#define I40E_NVM_CSUM          0x8
+
+#define I40E_NVM_ADAPT_SHIFT   16
+#define I40E_NVM_ADAPT_MASK    (0xffff << I40E_NVM_ADAPT_SHIFT)
+
+#define I40E_NVMUPD_MAX_DATA   4096
+#define I40E_NVMUPD_IFACE_TIMEOUT 2 /* seconds */
+
+struct i40e_nvm_access {
+       u32 command;
+       u32 config;
+       u32 offset;     /* in bytes */
+       u32 data_size;  /* in bytes */
+       u8 data[1];
+};
+
 /* PCI bus types */
 enum i40e_bus_type {
        i40e_bus_type_unknown = 0,
@@ -391,6 +458,9 @@ struct i40e_hw {
        /* Admin Queue info */
        struct i40e_adminq_info aq;
 
+       /* state of nvm update process */
+       enum i40e_nvmupd_state nvmupd_state;
+
        /* HMC info */
        struct i40e_hmc_info hmc; /* HMC info struct */
 
@@ -875,7 +945,6 @@ enum i40e_filter_pctype {
        I40E_FILTER_PCTYPE_FRAG_IPV4                    = 36,
        /* Note: Values 37-40 are reserved for future use */
        I40E_FILTER_PCTYPE_NONF_IPV6_UDP                = 41,
-       I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN            = 42,
        I40E_FILTER_PCTYPE_NONF_IPV6_TCP                = 43,
        I40E_FILTER_PCTYPE_NONF_IPV6_SCTP               = 44,
        I40E_FILTER_PCTYPE_NONF_IPV6_OTHER              = 45,
@@ -1162,4 +1231,7 @@ enum i40e_reset_type {
        I40E_RESET_GLOBR        = 2,
        I40E_RESET_EMPR         = 3,
 };
+
+/* RSS Hash Table Size */
+#define I40E_PFQF_CTL_0_HASHLUTSIZE_512        0x00010000
 #endif /* _I40E_TYPE_H_ */
index 60407a9..efee6b2 100644 (file)
@@ -193,7 +193,7 @@ static void i40evf_set_msglevel(struct net_device *netdev, u32 data)
 }
 
 /**
- * i40evf_get_drvinto - Get driver info
+ * i40evf_get_drvinfo - Get driver info
  * @netdev: network interface device structure
  * @drvinfo: ethool driver info structure
  *
@@ -632,7 +632,7 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key)
        u32 hlut_val;
        int i, j;
 
-       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX; i++) {
+       for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
                hlut_val = rd32(hw, I40E_VFQF_HLUT(i));
                indir[j++] = hlut_val & 0xff;
                indir[j++] = (hlut_val >> 8) & 0xff;
@@ -659,7 +659,7 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
        u32 hlut_val;
        int i, j;
 
-       for (i = 0, j = 0; i < I40E_VFQF_HLUT_MAX_INDEX + 1; i++) {
+       for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
                hlut_val = indir[j++];
                hlut_val |= indir[j++] << 8;
                hlut_val |= indir[j++] << 16;
index 7fc5f3b..ab15f4d 100644 (file)
@@ -34,9 +34,9 @@ static int i40evf_close(struct net_device *netdev);
 
 char i40evf_driver_name[] = "i40evf";
 static const char i40evf_driver_string[] =
-       "Intel(R) XL710 X710 Virtual Function Network Driver";
+       "Intel(R) XL710/X710 Virtual Function Network Driver";
 
-#define DRV_VERSION "0.9.34"
+#define DRV_VERSION "0.9.40"
 const char i40evf_driver_version[] = DRV_VERSION;
 static const char i40evf_copyright[] =
        "Copyright (c) 2013 - 2014 Intel Corporation.";
@@ -260,6 +260,12 @@ static void i40evf_fire_sw_int(struct i40evf_adapter *adapter,
        int i;
        uint32_t dyn_ctl;
 
+       if (mask & 1) {
+               dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTL01);
+               dyn_ctl |= I40E_VFINT_DYN_CTLN_SWINT_TRIG_MASK |
+                          I40E_VFINT_DYN_CTLN_CLEARPBA_MASK;
+               wr32(hw, I40E_VFINT_DYN_CTL01, dyn_ctl);
+       }
        for (i = 1; i < adapter->num_msix_vectors; i++) {
                if (mask & (1 << i)) {
                        dyn_ctl = rd32(hw, I40E_VFINT_DYN_CTLN1(i - 1));
@@ -278,6 +284,7 @@ void i40evf_irq_enable(struct i40evf_adapter *adapter, bool flush)
 {
        struct i40e_hw *hw = &adapter->hw;
 
+       i40evf_misc_irq_enable(adapter);
        i40evf_irq_enable_queues(adapter, ~0);
 
        if (flush)
@@ -520,7 +527,8 @@ static int i40evf_request_misc_irq(struct i40evf_adapter *adapter)
        struct net_device *netdev = adapter->netdev;
        int err;
 
-       sprintf(adapter->misc_vector_name, "i40evf:mbx");
+       snprintf(adapter->misc_vector_name,
+                sizeof(adapter->misc_vector_name) - 1, "i40evf:mbx");
        err = request_irq(adapter->msix_entries[0].vector,
                          &i40evf_msix_aq, 0,
                          adapter->misc_vector_name, netdev);
@@ -761,7 +769,7 @@ i40evf_mac_filter *i40evf_add_filter(struct i40evf_adapter *adapter,
 
        while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
                                &adapter->crit_section))
-               mdelay(1);
+               udelay(1);
 
        f = i40evf_find_filter(adapter, macaddr);
        if (NULL == f) {
@@ -833,7 +841,7 @@ static void i40evf_set_rx_mode(struct net_device *netdev)
 
        while (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK,
                                &adapter->crit_section))
-               mdelay(1);
+               udelay(1);
        /* remove filter if not in netdev list */
        list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
                bool found = false;
@@ -1290,12 +1298,16 @@ static void i40evf_watchdog_task(struct work_struct *work)
                                          struct i40evf_adapter,
                                          watchdog_task);
        struct i40e_hw *hw = &adapter->hw;
+       uint32_t rstat_val;
 
        if (test_and_set_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section))
                goto restart_watchdog;
 
        if (adapter->flags & I40EVF_FLAG_PF_COMMS_FAILED) {
-               if ((rd32(hw, I40E_VFGEN_RSTAT) & 0x3) == I40E_VFR_VFACTIVE) {
+               rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+               if ((rstat_val == I40E_VFR_VFACTIVE) ||
+                   (rstat_val == I40E_VFR_COMPLETED)) {
                        /* A chance for redemption! */
                        dev_err(&adapter->pdev->dev, "Hardware came out of reset. Attempting reinit.\n");
                        adapter->state = __I40EVF_STARTUP;
@@ -1321,8 +1333,11 @@ static void i40evf_watchdog_task(struct work_struct *work)
                goto watchdog_done;
 
        /* check for reset */
+       rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
+                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
        if (!(adapter->flags & I40EVF_FLAG_RESET_PENDING) &&
-           (rd32(hw, I40E_VFGEN_RSTAT) & 0x3) != I40E_VFR_VFACTIVE) {
+           (rstat_val != I40E_VFR_VFACTIVE) &&
+           (rstat_val != I40E_VFR_COMPLETED)) {
                adapter->state = __I40EVF_RESETTING;
                adapter->flags |= I40EVF_FLAG_RESET_PENDING;
                dev_err(&adapter->pdev->dev, "Hardware reset detected\n");
@@ -1388,6 +1403,8 @@ static void i40evf_watchdog_task(struct work_struct *work)
 watchdog_done:
        clear_bit(__I40EVF_IN_CRITICAL_TASK, &adapter->crit_section);
 restart_watchdog:
+       if (adapter->state == __I40EVF_REMOVE)
+               return;
        if (adapter->aq_required)
                mod_timer(&adapter->watchdog_timer,
                          jiffies + msecs_to_jiffies(20));
@@ -1488,7 +1505,8 @@ static void i40evf_reset_task(struct work_struct *work)
        for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
                rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
                            I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-               if (rstat_val != I40E_VFR_VFACTIVE)
+               if ((rstat_val != I40E_VFR_VFACTIVE) &&
+                   (rstat_val != I40E_VFR_COMPLETED))
                        break;
                else
                        msleep(I40EVF_RESET_WAIT_MS);
@@ -1502,12 +1520,16 @@ static void i40evf_reset_task(struct work_struct *work)
        for (i = 0; i < I40EVF_RESET_WAIT_COUNT; i++) {
                rstat_val = rd32(hw, I40E_VFGEN_RSTAT) &
                            I40E_VFGEN_RSTAT_VFR_STATE_MASK;
-               if (rstat_val == I40E_VFR_VFACTIVE)
+               if ((rstat_val == I40E_VFR_VFACTIVE) ||
+                   (rstat_val == I40E_VFR_COMPLETED))
                        break;
                else
                        msleep(I40EVF_RESET_WAIT_MS);
        }
        if (i == I40EVF_RESET_WAIT_COUNT) {
+               struct i40evf_mac_filter *f, *ftmp;
+               struct i40evf_vlan_filter *fv, *fvtmp;
+
                /* reset never finished */
                dev_err(&adapter->pdev->dev, "Reset never finished (%x)\n",
                        rstat_val);
@@ -1520,9 +1542,23 @@ static void i40evf_reset_task(struct work_struct *work)
                        i40evf_free_all_tx_resources(adapter);
                        i40evf_free_all_rx_resources(adapter);
                }
+
+               /* Delete all of the filters, both MAC and VLAN. */
+               list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list,
+                                        list) {
+                       list_del(&f->list);
+                       kfree(f);
+               }
+               list_for_each_entry_safe(fv, fvtmp, &adapter->vlan_filter_list,
+                                        list) {
+                       list_del(&fv->list);
+                       kfree(fv);
+               }
+
                i40evf_free_misc_irq(adapter);
                i40evf_reset_interrupt_capability(adapter);
                i40evf_free_queues(adapter);
+               i40evf_free_q_vectors(adapter);
                kfree(adapter->vf_res);
                i40evf_shutdown_adminq(hw);
                adapter->netdev->flags &= ~IFF_UP;
@@ -1939,8 +1975,10 @@ static int i40evf_check_reset_complete(struct i40e_hw *hw)
        int i;
 
        for (i = 0; i < 100; i++) {
-               rstat = rd32(hw, I40E_VFGEN_RSTAT);
-               if (rstat == I40E_VFR_VFACTIVE)
+               rstat = rd32(hw, I40E_VFGEN_RSTAT) &
+                           I40E_VFGEN_RSTAT_VFR_STATE_MASK;
+               if ((rstat == I40E_VFR_VFACTIVE) ||
+                   (rstat == I40E_VFR_COMPLETED))
                        return 0;
                udelay(10);
        }
@@ -2006,7 +2044,6 @@ static void i40evf_init_task(struct work_struct *work)
                }
                adapter->state = __I40EVF_INIT_VERSION_CHECK;
                goto restart;
-               break;
        case __I40EVF_INIT_VERSION_CHECK:
                if (!i40evf_asq_done(hw)) {
                        dev_err(&pdev->dev, "Admin queue command never completed\n");
@@ -2018,17 +2055,20 @@ static void i40evf_init_task(struct work_struct *work)
                if (err) {
                        dev_info(&pdev->dev, "Unable to verify API version (%d), retrying\n",
                                err);
+                       if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
+                               dev_info(&pdev->dev, "Resending request\n");
+                               err = i40evf_send_api_ver(adapter);
+                       }
                        goto err;
                }
                err = i40evf_send_vf_config_msg(adapter);
                if (err) {
-                       dev_err(&pdev->dev, "Unable send config request (%d)\n",
+                       dev_err(&pdev->dev, "Unable to send config request (%d)\n",
                                err);
                        goto err;
                }
                adapter->state = __I40EVF_INIT_GET_RESOURCES;
                goto restart;
-               break;
        case __I40EVF_INIT_GET_RESOURCES:
                /* aq msg sent, awaiting reply */
                if (!adapter->vf_res) {
@@ -2097,8 +2137,6 @@ static void i40evf_init_task(struct work_struct *work)
        ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
        ether_addr_copy(netdev->perm_addr, adapter->hw.mac.addr);
 
-       INIT_LIST_HEAD(&adapter->mac_filter_list);
-       INIT_LIST_HEAD(&adapter->vlan_filter_list);
        f = kzalloc(sizeof(*f), GFP_ATOMIC);
        if (NULL == f)
                goto err_sw_init;
@@ -2280,6 +2318,9 @@ static int i40evf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        hw->bus.device = PCI_SLOT(pdev->devfn);
        hw->bus.func = PCI_FUNC(pdev->devfn);
 
+       INIT_LIST_HEAD(&adapter->mac_filter_list);
+       INIT_LIST_HEAD(&adapter->vlan_filter_list);
+
        INIT_WORK(&adapter->reset_task, i40evf_reset_task);
        INIT_WORK(&adapter->adminq_task, i40evf_adminq_task);
        INIT_WORK(&adapter->watchdog_task, i40evf_watchdog_task);
@@ -2391,6 +2432,7 @@ static void i40evf_remove(struct pci_dev *pdev)
 {
        struct net_device *netdev = pci_get_drvdata(pdev);
        struct i40evf_adapter *adapter = netdev_priv(netdev);
+       struct i40evf_mac_filter *f, *ftmp;
        struct i40e_hw *hw = &adapter->hw;
 
        cancel_delayed_work_sync(&adapter->init_task);
@@ -2406,9 +2448,12 @@ static void i40evf_remove(struct pci_dev *pdev)
                i40evf_misc_irq_disable(adapter);
                i40evf_free_misc_irq(adapter);
                i40evf_reset_interrupt_capability(adapter);
+               i40evf_free_q_vectors(adapter);
        }
 
-       del_timer_sync(&adapter->watchdog_timer);
+       if (adapter->watchdog_timer.function)
+               del_timer_sync(&adapter->watchdog_timer);
+
        flush_scheduled_work();
 
        if (hw->aq.asq.count)
@@ -2419,6 +2464,13 @@ static void i40evf_remove(struct pci_dev *pdev)
 
        i40evf_free_queues(adapter);
        kfree(adapter->vf_res);
+       /* If we got removed before an up/down sequence, we've got a filter
+        * hanging out there that we need to get rid of.
+        */
+       list_for_each_entry_safe(f, ftmp, &adapter->mac_filter_list, list) {
+               list_del(&f->list);
+               kfree(f);
+       }
 
        free_netdev(netdev);
 
index 2dc0bac..66d12f5 100644 (file)
@@ -80,8 +80,9 @@ int i40evf_send_api_ver(struct i40evf_adapter *adapter)
  * @adapter: adapter structure
  *
  * Compare API versions with the PF. Must be called after admin queue is
- * initialized. Returns 0 if API versions match, -EIO if
- * they do not, or I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty.
+ * initialized. Returns 0 if API versions match, -EIO if they do not,
+ * I40E_ERR_ADMIN_QUEUE_NO_WORK if the admin queue is empty, and any errors
+ * from the firmware are propagated.
  **/
 int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
 {
@@ -102,13 +103,13 @@ int i40evf_verify_api_ver(struct i40evf_adapter *adapter)
                goto out_alloc;
 
        err = (i40e_status)le32_to_cpu(event.desc.cookie_low);
-       if (err) {
-               err = -EIO;
+       if (err)
                goto out_alloc;
-       }
 
        if ((enum i40e_virtchnl_ops)le32_to_cpu(event.desc.cookie_high) !=
            I40E_VIRTCHNL_OP_VERSION) {
+               dev_info(&adapter->pdev->dev, "Invalid reply type %d from PF\n",
+                        le32_to_cpu(event.desc.cookie_high));
                err = -EIO;
                goto out_alloc;
        }
@@ -247,11 +248,11 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
                vqpi++;
        }
 
+       adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
                           (u8 *)vqci, len);
        kfree(vqci);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
 }
 
 /**
@@ -274,10 +275,10 @@ void i40evf_enable_queues(struct i40evf_adapter *adapter)
        vqs.vsi_id = adapter->vsi_res->vsi_id;
        vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
        vqs.rx_queues = vqs.tx_queues;
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
-                          (u8 *)&vqs, sizeof(vqs));
        adapter->aq_pending |= I40EVF_FLAG_AQ_ENABLE_QUEUES;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_ENABLE_QUEUES;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+                          (u8 *)&vqs, sizeof(vqs));
 }
 
 /**
@@ -300,10 +301,10 @@ void i40evf_disable_queues(struct i40evf_adapter *adapter)
        vqs.vsi_id = adapter->vsi_res->vsi_id;
        vqs.tx_queues = (1 << adapter->vsi_res->num_queue_pairs) - 1;
        vqs.rx_queues = vqs.tx_queues;
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
-                          (u8 *)&vqs, sizeof(vqs));
        adapter->aq_pending |= I40EVF_FLAG_AQ_DISABLE_QUEUES;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_DISABLE_QUEUES;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+                          (u8 *)&vqs, sizeof(vqs));
 }
 
 /**
@@ -351,11 +352,11 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
        vimi->vecmap[v_idx].txq_map = 0;
        vimi->vecmap[v_idx].rxq_map = 0;
 
+       adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
                           (u8 *)vimi, len);
        kfree(vimi);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_MAP_VECTORS;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_MAP_VECTORS;
 }
 
 /**
@@ -412,12 +413,11 @@ void i40evf_add_ether_addrs(struct i40evf_adapter *adapter)
                        f->add = false;
                }
        }
+       adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
                           (u8 *)veal, len);
        kfree(veal);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_MAC_FILTER;
-
 }
 
 /**
@@ -474,11 +474,11 @@ void i40evf_del_ether_addrs(struct i40evf_adapter *adapter)
                        kfree(f);
                }
        }
+       adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
+       adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
        i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS,
                           (u8 *)veal, len);
        kfree(veal);
-       adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_MAC_FILTER;
-       adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_MAC_FILTER;
 }
 
 /**
@@ -535,10 +535,10 @@ void i40evf_add_vlans(struct i40evf_adapter *adapter)
                        f->add = false;
                }
        }
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
-       kfree(vvfl);
        adapter->aq_pending |= I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_ADD_VLAN_FILTER;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)vvfl, len);
+       kfree(vvfl);
 }
 
 /**
@@ -596,10 +596,10 @@ void i40evf_del_vlans(struct i40evf_adapter *adapter)
                        kfree(f);
                }
        }
-       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
-       kfree(vvfl);
        adapter->aq_pending |= I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
        adapter->aq_required &= ~I40EVF_FLAG_AQ_DEL_VLAN_FILTER;
+       i40evf_send_pf_msg(adapter, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)vvfl, len);
+       kfree(vvfl);
 }
 
 /**
index a2db388..236a618 100644 (file)
@@ -579,7 +579,6 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
                break;
        default:
                return -E1000_ERR_MAC_INIT;
-               break;
        }
 
        /* Set media type */
@@ -837,7 +836,6 @@ static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
                default:
                        ret_val = -E1000_ERR_PHY;
                        goto out;
-                       break;
                }
                ret_val = igb_get_phy_id(hw);
                goto out;
@@ -1481,6 +1479,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
        s32 ret_val;
        u16 i, rar_count = mac->rar_entry_count;
 
+       if ((hw->mac.type >= e1000_i210) &&
+           !(igb_get_flash_presence_i210(hw))) {
+               ret_val = igb_pll_workaround_i210(hw);
+               if (ret_val)
+                       return ret_val;
+       }
+
        /* Initialize identification LED */
        ret_val = igb_id_led_init(hw);
        if (ret_val) {
index 2a8bb35..217f813 100644 (file)
 #define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* SDP3 Data direction */
 
 /* Physical Func Reset Done Indication */
-#define E1000_CTRL_EXT_PFRSTD    0x00004000
-#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
-#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX  0x00400000
-#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
-#define E1000_CTRL_EXT_LINK_MODE_GMII   0x00000000
-#define E1000_CTRL_EXT_EIAME          0x01000000
-#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_PFRSTD  0x00004000
+#define E1000_CTRL_EXT_SDLPE   0X00040000  /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES   0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX   0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII  0x00000000
+#define E1000_CTRL_EXT_EIAME   0x01000000
+#define E1000_CTRL_EXT_IRCA            0x00000001
 /* Interrupt delay cancellation */
 /* Driver loaded bit for FW */
 #define E1000_CTRL_EXT_DRV_LOAD       0x10000000
@@ -62,6 +63,7 @@
 /* packet buffer parity error detection enabled */
 /* descriptor FIFO parity error detection enable */
 #define E1000_CTRL_EXT_PBA_CLR         0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN         0x00100000
 #define E1000_I2CCMD_REG_ADDR_SHIFT    16
 #define E1000_I2CCMD_PHY_ADDR_SHIFT    24
 #define E1000_I2CCMD_OPCODE_READ       0x08000000
index 89925e4..ce55ea5 100644 (file)
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
 /* These functions must be implemented by drivers */
 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
 s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
 #endif /* _E1000_HW_H_ */
index 337161f..65d9316 100644 (file)
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
        }
        return ret_val;
 }
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+       s32 ret_val;
+       u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+       u16 nvm_word, phy_word, pci_word, tmp_nvm;
+       int i;
+
+       /* Get and set needed register values */
+       wuc = rd32(E1000_WUC);
+       mdicnfg = rd32(E1000_MDICNFG);
+       reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+       wr32(E1000_MDICNFG, reg_val);
+
+       /* Get data from NVM, or set default */
+       ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+                                         &nvm_word);
+       if (ret_val)
+               nvm_word = E1000_INVM_DEFAULT_AL;
+       tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+       for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+               /* check current state directly from internal PHY */
+               igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+                                        E1000_PHY_PLL_FREQ_REG), &phy_word);
+               if ((phy_word & E1000_PHY_PLL_UNCONF)
+                   != E1000_PHY_PLL_UNCONF) {
+                       ret_val = 0;
+                       break;
+               } else {
+                       ret_val = -E1000_ERR_PHY;
+               }
+               /* directly reset the internal PHY */
+               ctrl = rd32(E1000_CTRL);
+               wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+               ctrl_ext = rd32(E1000_CTRL_EXT);
+               ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+               wr32(E1000_CTRL_EXT, ctrl_ext);
+
+               wr32(E1000_WUC, 0);
+               reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+               wr32(E1000_EEARBC_I210, reg_val);
+
+               igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               pci_word |= E1000_PCI_PMCSR_D3;
+               igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               usleep_range(1000, 2000);
+               pci_word &= ~E1000_PCI_PMCSR_D3;
+               igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+               reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+               wr32(E1000_EEARBC_I210, reg_val);
+
+               /* restore WUC register */
+               wr32(E1000_WUC, wuc);
+       }
+       /* restore MDICNFG setting */
+       wr32(E1000_MDICNFG, mdicnfg);
+       return ret_val;
+}
index 9f34976..3442b63 100644 (file)
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
 s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
 s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
 bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
 
 #define E1000_STM_OPCODE               0xDB00
 #define E1000_EEPROM_FLASH_SIZE_WORD   0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
 #define NVM_LED_1_CFG_DEFAULT_I211     0x0184
 #define NVM_LED_0_2_CFG_DEFAULT_I211   0x200C
 
+/* PLL Defines */
+#define E1000_PCI_PMCSR                        0x44
+#define E1000_PCI_PMCSR_D3             0x03
+#define E1000_MAX_PLL_TRIES            5
+#define E1000_PHY_PLL_UNCONF           0xFF
+#define E1000_PHY_PLL_FREQ_PAGE                0xFC0000
+#define E1000_PHY_PLL_FREQ_REG         0x000E
+#define E1000_INVM_DEFAULT_AL          0x202F
+#define E1000_INVM_AUTOLOAD            0x0A
+#define E1000_INVM_PLL_WO_VAL          0x0010
+
 #endif
index 1cc4b1a..6f0490d 100644 (file)
@@ -66,6 +66,7 @@
 #define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
 #define E1000_PBS      0x01008  /* Packet Buffer Size */
 #define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
 #define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
 #define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
 #define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
 #define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
 #define E1000_IOVTCL    0x05BBC /* IOV Control Register */
 #define E1000_TXSWC     0x05ACC /* Tx Switch Control */
+#define E1000_LVMMC    0x03548 /* Last VM Misbehavior cause */
 /* These act per VF so an array friendly macro is used */
 #define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
 #define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
index f145adb..cb14bbd 100644 (file)
@@ -57,8 +57,8 @@
 #include "igb.h"
 
 #define MAJ 5
-#define MIN 0
-#define BUILD 5
+#define MIN 2
+#define BUILD 13
 #define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
 __stringify(BUILD) "-k"
 char igb_driver_name[] = "igb";
@@ -1630,6 +1630,8 @@ void igb_power_up_link(struct igb_adapter *adapter)
                igb_power_up_phy_copper(&adapter->hw);
        else
                igb_power_up_serdes_link_82575(&adapter->hw);
+
+       igb_setup_link(&adapter->hw);
 }
 
 /**
@@ -4164,6 +4166,26 @@ static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
        return ret;
 }
 
+/**
+ *  igb_check_lvmmc - check for malformed packets received
+ *  and indicated in LVMMC register
+ *  @adapter: pointer to adapter
+ **/
+static void igb_check_lvmmc(struct igb_adapter *adapter)
+{
+       struct e1000_hw *hw = &adapter->hw;
+       u32 lvmmc;
+
+       lvmmc = rd32(E1000_LVMMC);
+       if (lvmmc) {
+               if (unlikely(net_ratelimit())) {
+                       netdev_warn(adapter->netdev,
+                                   "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
+                                   lvmmc);
+               }
+       }
+}
+
 /**
  *  igb_watchdog - Timer Call-back
  *  @data: pointer to adapter cast into an unsigned long
@@ -4359,6 +4381,11 @@ static void igb_watchdog_task(struct work_struct *work)
        igb_spoof_check(adapter);
        igb_ptp_rx_hang(adapter);
 
+       /* Check LVMMC register on i350/i354 only */
+       if ((adapter->hw.mac.type == e1000_i350) ||
+           (adapter->hw.mac.type == e1000_i354))
+               igb_check_lvmmc(adapter);
+
        /* Reset the timer */
        if (!test_bit(__IGB_DOWN, &adapter->state)) {
                if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
@@ -7215,6 +7242,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
        }
 }
 
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+
+       pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+       struct igb_adapter *adapter = hw->back;
+
+       pci_write_config_word(adapter->pdev, reg, *value);
+}
+
 s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
 {
        struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7619,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
 
        if (netif_running(netdev))
                igb_close(netdev);
+       else
+               igb_reset(adapter);
 
        igb_clear_interrupt_scheme(adapter);
 
index 1560933..c5c97b4 100644 (file)
@@ -122,7 +122,7 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
        struct ixgbe_phy_info *phy = &hw->phy;
-       s32 ret_val = 0;
+       s32 ret_val;
        u16 list_offset, data_offset;
 
        /* Identify the PHY */
@@ -147,28 +147,23 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw)
 
                /* Call SFP+ identify routine to get the SFP+ module type */
                ret_val = phy->ops.identify_sfp(hw);
-               if (ret_val != 0)
-                       goto out;
-               else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) {
-                       ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                       goto out;
-               }
+               if (ret_val)
+                       return ret_val;
+               if (hw->phy.sfp_type == ixgbe_sfp_type_unknown)
+                       return IXGBE_ERR_SFP_NOT_SUPPORTED;
 
                /* Check to see if SFP+ module is supported */
                ret_val = ixgbe_get_sfp_init_sequence_offsets(hw,
                                                            &list_offset,
                                                            &data_offset);
-               if (ret_val != 0) {
-                       ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                       goto out;
-               }
+               if (ret_val)
+                       return IXGBE_ERR_SFP_NOT_SUPPORTED;
                break;
        default:
                break;
        }
 
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -183,7 +178,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
 {
        u32 regval;
        u32 i;
-       s32 ret_val = 0;
+       s32 ret_val;
 
        ret_val = ixgbe_start_hw_generic(hw);
 
@@ -203,11 +198,13 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw)
                IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval);
        }
 
+       if (ret_val)
+               return ret_val;
+
        /* set the completion timeout for interface */
-       if (ret_val == 0)
-               ixgbe_set_pcie_completion_timeout(hw);
+       ixgbe_set_pcie_completion_timeout(hw);
 
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -222,7 +219,6 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
                                             ixgbe_link_speed *speed,
                                             bool *autoneg)
 {
-       s32 status = 0;
        u32 autoc = 0;
 
        /*
@@ -262,11 +258,10 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
                break;
 
        default:
-               status = IXGBE_ERR_LINK_SETUP;
-               break;
+               return IXGBE_ERR_LINK_SETUP;
        }
 
-       return status;
+       return 0;
 }
 
 /**
@@ -277,14 +272,12 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw,
  **/
 static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
 {
-       enum ixgbe_media_type media_type;
-
        /* Detect if there is a copper PHY attached. */
        switch (hw->phy.type) {
        case ixgbe_phy_cu_unknown:
        case ixgbe_phy_tn:
-               media_type = ixgbe_media_type_copper;
-               goto out;
+               return ixgbe_media_type_copper;
+
        default:
                break;
        }
@@ -294,30 +287,27 @@ static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82598:
        case IXGBE_DEV_ID_82598_BX:
                /* Default device ID is mezzanine card KX/KX4 */
-               media_type = ixgbe_media_type_backplane;
-               break;
+               return ixgbe_media_type_backplane;
+
        case IXGBE_DEV_ID_82598AF_DUAL_PORT:
        case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
        case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
        case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
        case IXGBE_DEV_ID_82598EB_XF_LR:
        case IXGBE_DEV_ID_82598EB_SFP_LOM:
-               media_type = ixgbe_media_type_fiber;
-               break;
+               return ixgbe_media_type_fiber;
+
        case IXGBE_DEV_ID_82598EB_CX4:
        case IXGBE_DEV_ID_82598_CX4_DUAL_PORT:
-               media_type = ixgbe_media_type_cx4;
-               break;
+               return ixgbe_media_type_cx4;
+
        case IXGBE_DEV_ID_82598AT:
        case IXGBE_DEV_ID_82598AT2:
-               media_type = ixgbe_media_type_copper;
-               break;
+               return ixgbe_media_type_copper;
+
        default:
-               media_type = ixgbe_media_type_unknown;
-               break;
+               return ixgbe_media_type_unknown;
        }
-out:
-       return media_type;
 }
 
 /**
@@ -328,7 +318,6 @@ out:
  **/
 static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
 {
-       s32 ret_val = 0;
        u32 fctrl_reg;
        u32 rmcs_reg;
        u32 reg;
@@ -338,10 +327,8 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
        bool link_up;
 
        /* Validate the water mark configuration */
-       if (!hw->fc.pause_time) {
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-               goto out;
-       }
+       if (!hw->fc.pause_time)
+               return IXGBE_ERR_INVALID_LINK_SETTINGS;
 
        /* Low water mark of zero causes XOFF floods */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -350,8 +337,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
                        if (!hw->fc.low_water[i] ||
                            hw->fc.low_water[i] >= hw->fc.high_water[i]) {
                                hw_dbg(hw, "Invalid water mark configuration\n");
-                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-                               goto out;
+                               return IXGBE_ERR_INVALID_LINK_SETTINGS;
                        }
                }
        }
@@ -428,9 +414,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
                break;
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = IXGBE_ERR_CONFIG;
-               goto out;
-               break;
+               return IXGBE_ERR_CONFIG;
        }
 
        /* Set 802.3x based flow control settings. */
@@ -461,8 +445,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw)
        /* Configure flow control refresh threshold value */
        IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -598,7 +581,7 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
                }
 
                if (!*link_up)
-                       goto out;
+                       return 0;
        }
 
        links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS);
@@ -629,7 +612,6 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw,
            (ixgbe_validate_link_ready(hw) != 0))
                *link_up = false;
 
-out:
        return 0;
 }
 
@@ -646,7 +628,6 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
                                      bool autoneg_wait_to_complete)
 {
        bool             autoneg           = false;
-       s32              status            = 0;
        ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN;
        u32              curr_autoc        = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32              autoc             = curr_autoc;
@@ -657,7 +638,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
        speed &= link_capabilities;
 
        if (speed == IXGBE_LINK_SPEED_UNKNOWN)
-               status = IXGBE_ERR_LINK_SETUP;
+               return IXGBE_ERR_LINK_SETUP;
 
        /* Set KX4/KX support according to speed requested */
        else if (link_mode == IXGBE_AUTOC_LMS_KX4_AN ||
@@ -671,17 +652,11 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw,
                        IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc);
        }
 
-       if (status == 0) {
-               /*
-                * Setup and restart the link based on the new values in
-                * ixgbe_hw This will write the AUTOC register based on the new
-                * stored values
-                */
-               status = ixgbe_start_mac_link_82598(hw,
-                                                   autoneg_wait_to_complete);
-       }
-
-       return status;
+       /* Setup and restart the link based on the new values in
+        * ixgbe_hw This will write the AUTOC register based on the new
+        * stored values
+        */
+       return ixgbe_start_mac_link_82598(hw, autoneg_wait_to_complete);
 }
 
 
@@ -718,7 +693,7 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw,
  **/
 static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
+       s32 status;
        s32 phy_status = 0;
        u32 ctrl;
        u32 gheccr;
@@ -728,8 +703,8 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
 
        /* Call adapter stop to disable tx/rx and clear interrupts */
        status = hw->mac.ops.stop_adapter(hw);
-       if (status != 0)
-               goto reset_hw_out;
+       if (status)
+               return status;
 
        /*
         * Power up the Atlas Tx lanes if they are currently powered down.
@@ -771,7 +746,7 @@ static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw)
                /* Init PHY and function pointers, perform SFP setup */
                phy_status = hw->phy.ops.init(hw);
                if (phy_status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-                       goto reset_hw_out;
+                       return phy_status;
                if (phy_status == IXGBE_ERR_SFP_NOT_PRESENT)
                        goto mac_reset_top;
 
@@ -837,7 +812,6 @@ mac_reset_top:
         */
        hw->mac.ops.init_rx_addrs(hw);
 
-reset_hw_out:
        if (phy_status)
                status = phy_status;
 
@@ -1109,106 +1083,6 @@ static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset,
                                        byte_offset, sff8472_data);
 }
 
-/**
- *  ixgbe_get_supported_physical_layer_82598 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw)
-{
-       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-       u16 ext_ability = 0;
-
-       hw->phy.ops.identify(hw);
-
-       /* Copper PHY must be checked before AUTOC LMS to determine correct
-        * physical layer because 10GBase-T PHYs use LMS = KX4/KX */
-       switch (hw->phy.type) {
-       case ixgbe_phy_tn:
-       case ixgbe_phy_cu_unknown:
-               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE,
-               MDIO_MMD_PMAPMD, &ext_ability);
-               if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-               goto out;
-       default:
-               break;
-       }
-
-       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-       case IXGBE_AUTOC_LMS_1G_AN:
-       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-               if (pma_pmd_1g == IXGBE_AUTOC_1G_KX)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-               else
-                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-               break;
-       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-               if (pma_pmd_10g == IXGBE_AUTOC_10G_CX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-               else if (pma_pmd_10g == IXGBE_AUTOC_10G_KX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-               else /* XAUI */
-                       physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-               break;
-       case IXGBE_AUTOC_LMS_KX4_AN:
-       case IXGBE_AUTOC_LMS_KX4_AN_1G_AN:
-               if (autoc & IXGBE_AUTOC_KX_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-               if (autoc & IXGBE_AUTOC_KX4_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-               break;
-       default:
-               break;
-       }
-
-       if (hw->phy.type == ixgbe_phy_nl) {
-               hw->phy.ops.identify_sfp(hw);
-
-               switch (hw->phy.sfp_type) {
-               case ixgbe_sfp_type_da_cu:
-                       physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-                       break;
-               case ixgbe_sfp_type_sr:
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-                       break;
-               case ixgbe_sfp_type_lr:
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-                       break;
-               default:
-                       physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-                       break;
-               }
-       }
-
-       switch (hw->device_id) {
-       case IXGBE_DEV_ID_82598_DA_DUAL_PORT:
-               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-               break;
-       case IXGBE_DEV_ID_82598AF_DUAL_PORT:
-       case IXGBE_DEV_ID_82598AF_SINGLE_PORT:
-       case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM:
-               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-               break;
-       case IXGBE_DEV_ID_82598EB_XF_LR:
-               physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-               break;
-       default:
-               break;
-       }
-
-out:
-       return physical_layer;
-}
-
 /**
  *  ixgbe_set_lan_id_multi_port_pcie_82598 - Set LAN id for PCIe multiple
  *  port devices.
@@ -1286,7 +1160,6 @@ static struct ixgbe_mac_operations mac_ops_82598 = {
        .start_hw               = &ixgbe_start_hw_82598,
        .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
        .get_media_type         = &ixgbe_get_media_type_82598,
-       .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598,
        .enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
        .get_mac_addr           = &ixgbe_get_mac_addr_generic,
        .stop_adapter           = &ixgbe_stop_adapter_generic,
index bc7c924..cf55a0d 100644 (file)
@@ -123,7 +123,7 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
 
 static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 {
-       s32 ret_val = 0;
+       s32 ret_val;
        u16 list_offset, data_offset, data_value;
 
        if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
@@ -133,16 +133,14 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 
                ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
                                                              &data_offset);
-               if (ret_val != 0)
-                       goto setup_sfp_out;
+               if (ret_val)
+                       return ret_val;
 
                /* PHY config will finish before releasing the semaphore */
                ret_val = hw->mac.ops.acquire_swfw_sync(hw,
                                                        IXGBE_GSSR_MAC_CSR_SM);
-               if (ret_val != 0) {
-                       ret_val = IXGBE_ERR_SWFW_SYNC;
-                       goto setup_sfp_out;
-               }
+               if (ret_val)
+                       return IXGBE_ERR_SWFW_SYNC;
 
                if (hw->eeprom.ops.read(hw, ++data_offset, &data_value))
                        goto setup_sfp_err;
@@ -169,13 +167,11 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw)
 
                if (ret_val) {
                        hw_dbg(hw, " sfp module setup not complete\n");
-                       ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
-                       goto setup_sfp_out;
+                       return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE;
                }
        }
 
-setup_sfp_out:
-       return ret_val;
+       return 0;
 
 setup_sfp_err:
        /* Release the semaphore */
@@ -294,7 +290,7 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw)
 {
        struct ixgbe_mac_info *mac = &hw->mac;
        struct ixgbe_phy_info *phy = &hw->phy;
-       s32 ret_val = 0;
+       s32 ret_val;
        u32 esdp;
 
        if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) {
@@ -355,7 +351,6 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
                                             ixgbe_link_speed *speed,
                                             bool *autoneg)
 {
-       s32 status = 0;
        u32 autoc = 0;
 
        /* Determine 1G link capabilities off of SFP+ type */
@@ -367,7 +362,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
            hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1) {
                *speed = IXGBE_LINK_SPEED_1GB_FULL;
                *autoneg = true;
-               goto out;
+               return 0;
        }
 
        /*
@@ -430,9 +425,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
                break;
 
        default:
-               status = IXGBE_ERR_LINK_SETUP;
-               goto out;
-               break;
+               return IXGBE_ERR_LINK_SETUP;
        }
 
        if (hw->phy.multispeed_fiber) {
@@ -446,8 +439,7 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw,
                        *autoneg = true;
        }
 
-out:
-       return status;
+       return 0;
 }
 
 /**
@@ -458,14 +450,12 @@ out:
  **/
 static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
 {
-       enum ixgbe_media_type media_type;
-
        /* Detect if there is a copper PHY attached. */
        switch (hw->phy.type) {
        case ixgbe_phy_cu_unknown:
        case ixgbe_phy_tn:
-               media_type = ixgbe_media_type_copper;
-               goto out;
+               return ixgbe_media_type_copper;
+
        default:
                break;
        }
@@ -478,34 +468,31 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
        case IXGBE_DEV_ID_82599_BACKPLANE_FCOE:
        case IXGBE_DEV_ID_82599_XAUI_LOM:
                /* Default device ID is mezzanine card KX/KX4 */
-               media_type = ixgbe_media_type_backplane;
-               break;
+               return ixgbe_media_type_backplane;
+
        case IXGBE_DEV_ID_82599_SFP:
        case IXGBE_DEV_ID_82599_SFP_FCOE:
        case IXGBE_DEV_ID_82599_SFP_EM:
        case IXGBE_DEV_ID_82599_SFP_SF2:
        case IXGBE_DEV_ID_82599_SFP_SF_QP:
        case IXGBE_DEV_ID_82599EN_SFP:
-               media_type = ixgbe_media_type_fiber;
-               break;
+               return ixgbe_media_type_fiber;
+
        case IXGBE_DEV_ID_82599_CX4:
-               media_type = ixgbe_media_type_cx4;
-               break;
+               return ixgbe_media_type_cx4;
+
        case IXGBE_DEV_ID_82599_T3_LOM:
-               media_type = ixgbe_media_type_copper;
-               break;
+               return ixgbe_media_type_copper;
+
        case IXGBE_DEV_ID_82599_LS:
-               media_type = ixgbe_media_type_fiber_lco;
-               break;
+               return ixgbe_media_type_fiber_lco;
+
        case IXGBE_DEV_ID_82599_QSFP_SF_QP:
-               media_type = ixgbe_media_type_fiber_qsfp;
-               break;
+               return ixgbe_media_type_fiber_qsfp;
+
        default:
-               media_type = ixgbe_media_type_unknown;
-               break;
+               return ixgbe_media_type_unknown;
        }
-out:
-       return media_type;
 }
 
 /**
@@ -555,7 +542,7 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
                status = hw->mac.ops.acquire_swfw_sync(hw,
                                                IXGBE_GSSR_MAC_CSR_SM);
                if (status)
-                       goto out;
+                       return status;
 
                got_lock = true;
        }
@@ -592,7 +579,6 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
        /* Add delay to filter out noises during initial link setup */
        msleep(50);
 
-out:
        return status;
 }
 
@@ -959,7 +945,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                                      bool autoneg_wait_to_complete)
 {
        bool autoneg = false;
-       s32 status = 0;
+       s32 status;
        u32 pma_pmd_1g, link_mode, links_reg, i;
        u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
        u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
@@ -975,15 +961,13 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
        /* Check to see if speed passed in is supported. */
        status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities,
                                                   &autoneg);
-       if (status != 0)
-               goto out;
+       if (status)
+               return status;
 
        speed &= link_capabilities;
 
-       if (speed == IXGBE_LINK_SPEED_UNKNOWN) {
-               status = IXGBE_ERR_LINK_SETUP;
-               goto out;
-       }
+       if (speed == IXGBE_LINK_SPEED_UNKNOWN)
+               return IXGBE_ERR_LINK_SETUP;
 
        /* Use stored value (EEPROM defaults) of AUTOC to find KR/KX4 support*/
        if (hw->mac.orig_link_settings_stored)
@@ -1034,7 +1018,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                /* Restart link */
                status = hw->mac.ops.prot_autoc_write(hw, autoc, false);
                if (status)
-                       goto out;
+                       return status;
 
                /* Only poll for autoneg to complete if specified to do so */
                if (autoneg_wait_to_complete) {
@@ -1061,7 +1045,6 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
                msleep(50);
        }
 
-out:
        return status;
 }
 
@@ -1106,8 +1089,8 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
 
        /* Call adapter stop to disable tx/rx and clear interrupts */
        status = hw->mac.ops.stop_adapter(hw);
-       if (status != 0)
-               goto reset_hw_out;
+       if (status)
+               return status;
 
        /* flush pending Tx transactions */
        ixgbe_clear_tx_pending(hw);
@@ -1118,7 +1101,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
        status = hw->phy.ops.init(hw);
 
        if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               goto reset_hw_out;
+               return status;
 
        /* Setup SFP module if there is one present. */
        if (hw->phy.sfp_setup_needed) {
@@ -1127,7 +1110,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
        }
 
        if (status == IXGBE_ERR_SFP_NOT_SUPPORTED)
-               goto reset_hw_out;
+               return status;
 
        /* Reset PHY */
        if (hw->phy.reset_disable == false && hw->phy.ops.reset != NULL)
@@ -1217,7 +1200,7 @@ mac_reset_top:
                                                        hw->mac.orig_autoc,
                                                        false);
                        if (status)
-                               goto reset_hw_out;
+                               return status;
                }
 
                if ((autoc2 & IXGBE_AUTOC2_UPPER_MASK) !=
@@ -1259,7 +1242,6 @@ mac_reset_top:
        hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
                                       &hw->mac.wwpn_prefix);
 
-reset_hw_out:
        return status;
 }
 
@@ -1928,20 +1910,20 @@ static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw)
        s32 ret_val = 0;
 
        ret_val = ixgbe_start_hw_generic(hw);
-       if (ret_val != 0)
-               goto out;
+       if (ret_val)
+               return ret_val;
 
        ret_val = ixgbe_start_hw_gen2(hw);
-       if (ret_val != 0)
-               goto out;
+       if (ret_val)
+               return ret_val;
 
        /* We need to run link autotry after the driver loads */
        hw->mac.autotry_restart = true;
 
-       if (ret_val == 0)
-               ret_val = ixgbe_verify_fw_version_82599(hw);
-out:
-       return ret_val;
+       if (ret_val)
+               return ret_val;
+
+       return ixgbe_verify_fw_version_82599(hw);
 }
 
 /**
@@ -1954,16 +1936,15 @@ out:
  **/
 static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
 {
-       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       s32 status;
 
        /* Detect PHY if not unknown - returns success if already detected. */
        status = ixgbe_identify_phy_generic(hw);
-       if (status != 0) {
+       if (status) {
                /* 82599 10GBASE-T requires an external PHY */
                if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper)
-                       goto out;
-               else
-                       status = ixgbe_identify_module_generic(hw);
+                       return status;
+               status = ixgbe_identify_module_generic(hw);
        }
 
        /* Set PHY type none if no PHY detected */
@@ -1974,141 +1955,11 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw)
 
        /* Return error if SFP module has been detected but is not supported */
        if (hw->phy.type == ixgbe_phy_sfp_unsupported)
-               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
 
-out:
        return status;
 }
 
-/**
- *  ixgbe_get_supported_physical_layer_82599 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw)
-{
-       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-       u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC);
-       u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
-       u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK;
-       u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK;
-       u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK;
-       u16 ext_ability = 0;
-       u8 comp_codes_10g = 0;
-       u8 comp_codes_1g = 0;
-
-       hw->phy.ops.identify(hw);
-
-       switch (hw->phy.type) {
-       case ixgbe_phy_tn:
-       case ixgbe_phy_cu_unknown:
-               hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                                                        &ext_ability);
-               if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-               if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-               goto out;
-       default:
-               break;
-       }
-
-       switch (autoc & IXGBE_AUTOC_LMS_MASK) {
-       case IXGBE_AUTOC_LMS_1G_AN:
-       case IXGBE_AUTOC_LMS_1G_LINK_NO_AN:
-               if (pma_pmd_1g == IXGBE_AUTOC_1G_KX_BX) {
-                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX |
-                           IXGBE_PHYSICAL_LAYER_1000BASE_BX;
-                       goto out;
-               } else
-                       /* SFI mode so read SFP module */
-                       goto sfp_check;
-               break;
-       case IXGBE_AUTOC_LMS_10G_LINK_NO_AN:
-               if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_CX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_CX4;
-               else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_KX4)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-               else if (pma_pmd_10g_parallel == IXGBE_AUTOC_10G_XAUI)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_XAUI;
-               goto out;
-               break;
-       case IXGBE_AUTOC_LMS_10G_SERIAL:
-               if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_KR) {
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-                       goto out;
-               } else if (pma_pmd_10g_serial == IXGBE_AUTOC2_10G_SFI)
-                       goto sfp_check;
-               break;
-       case IXGBE_AUTOC_LMS_KX4_KX_KR:
-       case IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN:
-               if (autoc & IXGBE_AUTOC_KX_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_KX;
-               if (autoc & IXGBE_AUTOC_KX4_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KX4;
-               if (autoc & IXGBE_AUTOC_KR_SUPP)
-                       physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_KR;
-               goto out;
-               break;
-       default:
-               goto out;
-               break;
-       }
-
-sfp_check:
-       /* SFP check must be done last since DA modules are sometimes used to
-        * test KR mode -  we need to id KR mode correctly before SFP module.
-        * Call identify_sfp because the pluggable module may have changed */
-       hw->phy.ops.identify_sfp(hw);
-       if (hw->phy.sfp_type == ixgbe_sfp_type_not_present)
-               goto out;
-
-       switch (hw->phy.type) {
-       case ixgbe_phy_sfp_passive_tyco:
-       case ixgbe_phy_sfp_passive_unknown:
-       case ixgbe_phy_qsfp_passive_unknown:
-               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU;
-               break;
-       case ixgbe_phy_sfp_ftl_active:
-       case ixgbe_phy_sfp_active_unknown:
-       case ixgbe_phy_qsfp_active_unknown:
-               physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA;
-               break;
-       case ixgbe_phy_sfp_avago:
-       case ixgbe_phy_sfp_ftl:
-       case ixgbe_phy_sfp_intel:
-       case ixgbe_phy_sfp_unknown:
-               hw->phy.ops.read_i2c_eeprom(hw,
-                     IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g);
-               hw->phy.ops.read_i2c_eeprom(hw,
-                     IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g);
-               if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-               else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-               else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T;
-               break;
-       case ixgbe_phy_qsfp_intel:
-       case ixgbe_phy_qsfp_unknown:
-               hw->phy.ops.read_i2c_eeprom(hw,
-                       IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g);
-               if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR;
-               else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-                       physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR;
-               break;
-       default:
-               break;
-       }
-
-out:
-       return physical_layer;
-}
-
 /**
  *  ixgbe_enable_rx_dma_82599 - Enable the Rx DMA unit on 82599
  *  @hw: pointer to hardware structure
@@ -2151,26 +2002,24 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
        u16 fw_version = 0;
 
        /* firmware check is only necessary for SFI devices */
-       if (hw->phy.media_type != ixgbe_media_type_fiber) {
-               status = 0;
-               goto fw_version_out;
-       }
+       if (hw->phy.media_type != ixgbe_media_type_fiber)
+               return 0;
 
        /* get the offset to the Firmware Module block */
        offset = IXGBE_FW_PTR;
        if (hw->eeprom.ops.read(hw, offset, &fw_offset))
                goto fw_version_err;
 
-       if ((fw_offset == 0) || (fw_offset == 0xFFFF))
-               goto fw_version_out;
+       if (fw_offset == 0 || fw_offset == 0xFFFF)
+               return IXGBE_ERR_EEPROM_VERSION;
 
        /* get the offset to the Pass Through Patch Configuration block */
        offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR;
        if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset))
                goto fw_version_err;
 
-       if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF))
-               goto fw_version_out;
+       if (fw_ptp_cfg_offset == 0 || fw_ptp_cfg_offset == 0xFFFF)
+               return IXGBE_ERR_EEPROM_VERSION;
 
        /* get the firmware version */
        offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4;
@@ -2180,7 +2029,6 @@ static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw)
        if (fw_version > 0x5)
                status = 0;
 
-fw_version_out:
        return status;
 
 fw_version_err:
@@ -2197,37 +2045,33 @@ fw_version_err:
  **/
 static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw)
 {
-       bool lesm_enabled = false;
        u16 fw_offset, fw_lesm_param_offset, fw_lesm_state;
        s32 status;
 
        /* get the offset to the Firmware Module block */
        status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset);
 
-       if ((status != 0) ||
-           (fw_offset == 0) || (fw_offset == 0xFFFF))
-               goto out;
+       if (status || fw_offset == 0 || fw_offset == 0xFFFF)
+               return false;
 
        /* get the offset to the LESM Parameters block */
        status = hw->eeprom.ops.read(hw, (fw_offset +
                                     IXGBE_FW_LESM_PARAMETERS_PTR),
                                     &fw_lesm_param_offset);
 
-       if ((status != 0) ||
-           (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF))
-               goto out;
+       if (status ||
+           fw_lesm_param_offset == 0 || fw_lesm_param_offset == 0xFFFF)
+               return false;
 
        /* get the lesm state word */
        status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset +
                                     IXGBE_FW_LESM_STATE_1),
                                     &fw_lesm_state);
 
-       if ((status == 0) &&
-           (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
-               lesm_enabled = true;
+       if (!status && (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED))
+               return true;
 
-out:
-       return lesm_enabled;
+       return false;
 }
 
 /**
@@ -2245,22 +2089,16 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset,
                                          u16 words, u16 *data)
 {
        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-       s32 ret_val = IXGBE_ERR_CONFIG;
 
-       /*
-        * If EEPROM is detected and can be addressed using 14 bits,
+       /* If EEPROM is detected and can be addressed using 14 bits,
         * use EERD otherwise use bit bang
         */
-       if ((eeprom->type == ixgbe_eeprom_spi) &&
-           (offset + (words - 1) <= IXGBE_EERD_MAX_ADDR))
-               ret_val = ixgbe_read_eerd_buffer_generic(hw, offset, words,
-                                                        data);
-       else
-               ret_val = ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset,
-                                                                   words,
-                                                                   data);
+       if (eeprom->type == ixgbe_eeprom_spi &&
+           offset + (words - 1) <= IXGBE_EERD_MAX_ADDR)
+               return ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
 
-       return ret_val;
+       return ixgbe_read_eeprom_buffer_bit_bang_generic(hw, offset, words,
+                                                        data);
 }
 
 /**
@@ -2277,19 +2115,15 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw,
                                   u16 offset, u16 *data)
 {
        struct ixgbe_eeprom_info *eeprom = &hw->eeprom;
-       s32 ret_val = IXGBE_ERR_CONFIG;
 
        /*
         * If EEPROM is detected and can be addressed using 14 bits,
         * use EERD otherwise use bit bang
         */
-       if ((eeprom->type == ixgbe_eeprom_spi) &&
-           (offset <= IXGBE_EERD_MAX_ADDR))
-               ret_val = ixgbe_read_eerd_generic(hw, offset, data);
-       else
-               ret_val = ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
+       if (eeprom->type == ixgbe_eeprom_spi && offset <= IXGBE_EERD_MAX_ADDR)
+               return ixgbe_read_eerd_generic(hw, offset, data);
 
-       return ret_val;
+       return ixgbe_read_eeprom_bit_bang_generic(hw, offset, data);
 }
 
 /**
@@ -2458,7 +2292,6 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
        .start_hw               = &ixgbe_start_hw_82599,
        .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
        .get_media_type         = &ixgbe_get_media_type_82599,
-       .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599,
        .enable_rx_dma          = &ixgbe_enable_rx_dma_82599,
        .disable_rx_buff        = &ixgbe_disable_rx_buff_generic,
        .enable_rx_buff         = &ixgbe_enable_rx_buff_generic,
index 4e5385a..b5f484b 100644 (file)
@@ -122,8 +122,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
         */
        if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) {
                hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n");
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-               goto out;
+               return IXGBE_ERR_INVALID_LINK_SETTINGS;
        }
 
        /*
@@ -143,7 +142,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
                /* some MAC's need RMW protection on AUTOC */
                ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &reg_bp);
                if (ret_val)
-                       goto out;
+                       return ret_val;
 
                /* only backplane uses autoc so fall though */
        case ixgbe_media_type_fiber:
@@ -214,9 +213,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
                break;
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = IXGBE_ERR_CONFIG;
-               goto out;
-               break;
+               return IXGBE_ERR_CONFIG;
        }
 
        if (hw->mac.type != ixgbe_mac_X540) {
@@ -247,7 +244,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
                 */
                ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked);
                if (ret_val)
-                       goto out;
+                       return ret_val;
 
        } else if ((hw->phy.media_type == ixgbe_media_type_copper) &&
                   ixgbe_device_supports_autoneg_fc(hw)) {
@@ -256,7 +253,6 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw)
        }
 
        hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg);
-out:
        return ret_val;
 }
 
@@ -295,12 +291,11 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw)
        /* Setup flow control */
        ret_val = ixgbe_setup_fc(hw);
        if (!ret_val)
-               goto out;
+               return 0;
 
        /* Clear adapter stopped flag */
        hw->adapter_stopped = false;
 
-out:
        return ret_val;
 }
 
@@ -837,20 +832,16 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw)
 s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                               u16 words, u16 *data)
 {
-       s32 status = 0;
+       s32 status;
        u16 i, count;
 
        hw->eeprom.ops.init_params(hw);
 
-       if (words == 0) {
-               status = IXGBE_ERR_INVALID_ARGUMENT;
-               goto out;
-       }
+       if (words == 0)
+               return IXGBE_ERR_INVALID_ARGUMENT;
 
-       if (offset + words > hw->eeprom.word_size) {
-               status = IXGBE_ERR_EEPROM;
-               goto out;
-       }
+       if (offset + words > hw->eeprom.word_size)
+               return IXGBE_ERR_EEPROM;
 
        /*
         * The EEPROM page size cannot be queried from the chip. We do lazy
@@ -875,7 +866,6 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                        break;
        }
 
-out:
        return status;
 }
 
@@ -900,64 +890,61 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
 
        /* Prepare the EEPROM for writing  */
        status = ixgbe_acquire_eeprom(hw);
+       if (status)
+               return status;
 
-       if (status == 0) {
-               if (ixgbe_ready_eeprom(hw) != 0) {
-                       ixgbe_release_eeprom(hw);
-                       status = IXGBE_ERR_EEPROM;
-               }
+       if (ixgbe_ready_eeprom(hw) != 0) {
+               ixgbe_release_eeprom(hw);
+               return IXGBE_ERR_EEPROM;
        }
 
-       if (status == 0) {
-               for (i = 0; i < words; i++) {
-                       ixgbe_standby_eeprom(hw);
+       for (i = 0; i < words; i++) {
+               ixgbe_standby_eeprom(hw);
+
+               /* Send the WRITE ENABLE command (8 bit opcode) */
+               ixgbe_shift_out_eeprom_bits(hw,
+                                           IXGBE_EEPROM_WREN_OPCODE_SPI,
+                                           IXGBE_EEPROM_OPCODE_BITS);
 
-                       /*  Send the WRITE ENABLE command (8 bit opcode )  */
-                       ixgbe_shift_out_eeprom_bits(hw,
-                                                 IXGBE_EEPROM_WREN_OPCODE_SPI,
-                                                 IXGBE_EEPROM_OPCODE_BITS);
+               ixgbe_standby_eeprom(hw);
 
-                       ixgbe_standby_eeprom(hw);
+               /* Some SPI eeproms use the 8th address bit embedded
+                * in the opcode
+                */
+               if ((hw->eeprom.address_bits == 8) &&
+                   ((offset + i) >= 128))
+                       write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
 
-                       /*
-                        * Some SPI eeproms use the 8th address bit embedded
-                        * in the opcode
-                        */
-                       if ((hw->eeprom.address_bits == 8) &&
-                           ((offset + i) >= 128))
-                               write_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
-                       /* Send the Write command (8-bit opcode + addr) */
-                       ixgbe_shift_out_eeprom_bits(hw, write_opcode,
-                                                   IXGBE_EEPROM_OPCODE_BITS);
-                       ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
-                                                   hw->eeprom.address_bits);
-
-                       page_size = hw->eeprom.word_page_size;
-
-                       /* Send the data in burst via SPI*/
-                       do {
-                               word = data[i];
-                               word = (word >> 8) | (word << 8);
-                               ixgbe_shift_out_eeprom_bits(hw, word, 16);
-
-                               if (page_size == 0)
-                                       break;
-
-                               /* do not wrap around page */
-                               if (((offset + i) & (page_size - 1)) ==
-                                   (page_size - 1))
-                                       break;
-                       } while (++i < words);
-
-                       ixgbe_standby_eeprom(hw);
-                       usleep_range(10000, 20000);
-               }
-               /* Done with writing - release the EEPROM */
-               ixgbe_release_eeprom(hw);
+               /* Send the Write command (8-bit opcode + addr) */
+               ixgbe_shift_out_eeprom_bits(hw, write_opcode,
+                                           IXGBE_EEPROM_OPCODE_BITS);
+               ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+                                           hw->eeprom.address_bits);
+
+               page_size = hw->eeprom.word_page_size;
+
+               /* Send the data in burst via SPI */
+               do {
+                       word = data[i];
+                       word = (word >> 8) | (word << 8);
+                       ixgbe_shift_out_eeprom_bits(hw, word, 16);
+
+                       if (page_size == 0)
+                               break;
+
+                       /* do not wrap around page */
+                       if (((offset + i) & (page_size - 1)) ==
+                           (page_size - 1))
+                               break;
+               } while (++i < words);
+
+               ixgbe_standby_eeprom(hw);
+               usleep_range(10000, 20000);
        }
+       /* Done with writing - release the EEPROM */
+       ixgbe_release_eeprom(hw);
 
-       return status;
+       return 0;
 }
 
 /**
@@ -971,19 +958,12 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
  **/
 s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data)
 {
-       s32 status;
-
        hw->eeprom.ops.init_params(hw);
 
-       if (offset >= hw->eeprom.word_size) {
-               status = IXGBE_ERR_EEPROM;
-               goto out;
-       }
-
-       status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
+       if (offset >= hw->eeprom.word_size)
+               return IXGBE_ERR_EEPROM;
 
-out:
-       return status;
+       return ixgbe_write_eeprom_buffer_bit_bang(hw, offset, 1, &data);
 }
 
 /**
@@ -998,20 +978,16 @@ out:
 s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                              u16 words, u16 *data)
 {
-       s32 status = 0;
+       s32 status;
        u16 i, count;
 
        hw->eeprom.ops.init_params(hw);
 
-       if (words == 0) {
-               status = IXGBE_ERR_INVALID_ARGUMENT;
-               goto out;
-       }
+       if (words == 0)
+               return IXGBE_ERR_INVALID_ARGUMENT;
 
-       if (offset + words > hw->eeprom.word_size) {
-               status = IXGBE_ERR_EEPROM;
-               goto out;
-       }
+       if (offset + words > hw->eeprom.word_size)
+               return IXGBE_ERR_EEPROM;
 
        /*
         * We cannot hold synchronization semaphores for too long
@@ -1025,12 +1001,11 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i,
                                                           count, &data[i]);
 
-               if (status != 0)
-                       break;
+               if (status)
+                       return status;
        }
 
-out:
-       return status;
+       return 0;
 }
 
 /**
@@ -1052,41 +1027,38 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
 
        /* Prepare the EEPROM for reading  */
        status = ixgbe_acquire_eeprom(hw);
+       if (status)
+               return status;
 
-       if (status == 0) {
-               if (ixgbe_ready_eeprom(hw) != 0) {
-                       ixgbe_release_eeprom(hw);
-                       status = IXGBE_ERR_EEPROM;
-               }
+       if (ixgbe_ready_eeprom(hw) != 0) {
+               ixgbe_release_eeprom(hw);
+               return IXGBE_ERR_EEPROM;
        }
 
-       if (status == 0) {
-               for (i = 0; i < words; i++) {
-                       ixgbe_standby_eeprom(hw);
-                       /*
-                        * Some SPI eeproms use the 8th address bit embedded
-                        * in the opcode
-                        */
-                       if ((hw->eeprom.address_bits == 8) &&
-                           ((offset + i) >= 128))
-                               read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
-
-                       /* Send the READ command (opcode + addr) */
-                       ixgbe_shift_out_eeprom_bits(hw, read_opcode,
-                                                   IXGBE_EEPROM_OPCODE_BITS);
-                       ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
-                                                   hw->eeprom.address_bits);
-
-                       /* Read the data. */
-                       word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
-                       data[i] = (word_in >> 8) | (word_in << 8);
-               }
+       for (i = 0; i < words; i++) {
+               ixgbe_standby_eeprom(hw);
+               /* Some SPI eeproms use the 8th address bit embedded
+                * in the opcode
+                */
+               if ((hw->eeprom.address_bits == 8) &&
+                   ((offset + i) >= 128))
+                       read_opcode |= IXGBE_EEPROM_A8_OPCODE_SPI;
 
-               /* End this read operation */
-               ixgbe_release_eeprom(hw);
+               /* Send the READ command (opcode + addr) */
+               ixgbe_shift_out_eeprom_bits(hw, read_opcode,
+                                           IXGBE_EEPROM_OPCODE_BITS);
+               ixgbe_shift_out_eeprom_bits(hw, (u16)((offset + i) * 2),
+                                           hw->eeprom.address_bits);
+
+               /* Read the data. */
+               word_in = ixgbe_shift_in_eeprom_bits(hw, 16);
+               data[i] = (word_in >> 8) | (word_in << 8);
        }
 
-       return status;
+       /* End this read operation */
+       ixgbe_release_eeprom(hw);
+
+       return 0;
 }
 
 /**
@@ -1100,19 +1072,12 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset,
 s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset,
                                       u16 *data)
 {
-       s32 status;
-
        hw->eeprom.ops.init_params(hw);
 
-       if (offset >= hw->eeprom.word_size) {
-               status = IXGBE_ERR_EEPROM;
-               goto out;
-       }
-
-       status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
+       if (offset >= hw->eeprom.word_size)
+               return IXGBE_ERR_EEPROM;
 
-out:
-       return status;
+       return ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
 }
 
 /**
@@ -1128,20 +1093,16 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
                                   u16 words, u16 *data)
 {
        u32 eerd;
-       s32 status = 0;
+       s32 status;
        u32 i;
 
        hw->eeprom.ops.init_params(hw);
 
-       if (words == 0) {
-               status = IXGBE_ERR_INVALID_ARGUMENT;
-               goto out;
-       }
+       if (words == 0)
+               return IXGBE_ERR_INVALID_ARGUMENT;
 
-       if (offset >= hw->eeprom.word_size) {
-               status = IXGBE_ERR_EEPROM;
-               goto out;
-       }
+       if (offset >= hw->eeprom.word_size)
+               return IXGBE_ERR_EEPROM;
 
        for (i = 0; i < words; i++) {
                eerd = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1155,11 +1116,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset,
                                   IXGBE_EEPROM_RW_REG_DATA);
                } else {
                        hw_dbg(hw, "Eeprom read timed out\n");
-                       goto out;
+                       return status;
                }
        }
-out:
-       return status;
+
+       return 0;
 }
 
 /**
@@ -1175,7 +1136,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
                                                 u16 offset)
 {
        u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX];
-       s32 status = 0;
+       s32 status;
        u16 i;
 
        for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++)
@@ -1185,12 +1146,12 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
        status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset,
                                             IXGBE_EEPROM_PAGE_SIZE_MAX, data);
        hw->eeprom.word_page_size = 0;
-       if (status != 0)
-               goto out;
+       if (status)
+               return status;
 
        status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data);
-       if (status != 0)
-               goto out;
+       if (status)
+               return status;
 
        /*
         * When writing in burst more than the actual page size
@@ -1200,8 +1161,7 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw,
 
        hw_dbg(hw, "Detected EEPROM page size = %d words.\n",
               hw->eeprom.word_page_size);
-out:
-       return status;
+       return 0;
 }
 
 /**
@@ -1230,20 +1190,16 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
                                    u16 words, u16 *data)
 {
        u32 eewr;
-       s32 status = 0;
+       s32 status;
        u16 i;
 
        hw->eeprom.ops.init_params(hw);
 
-       if (words == 0) {
-               status = IXGBE_ERR_INVALID_ARGUMENT;
-               goto out;
-       }
+       if (words == 0)
+               return IXGBE_ERR_INVALID_ARGUMENT;
 
-       if (offset >= hw->eeprom.word_size) {
-               status = IXGBE_ERR_EEPROM;
-               goto out;
-       }
+       if (offset >= hw->eeprom.word_size)
+               return IXGBE_ERR_EEPROM;
 
        for (i = 0; i < words; i++) {
                eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) |
@@ -1251,22 +1207,21 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset,
                       IXGBE_EEPROM_RW_REG_START;
 
                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
-               if (status != 0) {
+               if (status) {
                        hw_dbg(hw, "Eeprom write EEWR timed out\n");
-                       goto out;
+                       return status;
                }
 
                IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr);
 
                status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE);
-               if (status != 0) {
+               if (status) {
                        hw_dbg(hw, "Eeprom write EEWR timed out\n");
-                       goto out;
+                       return status;
                }
        }
 
-out:
-       return status;
+       return 0;
 }
 
 /**
@@ -1294,7 +1249,6 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
 {
        u32 i;
        u32 reg;
-       s32 status = IXGBE_ERR_EEPROM;
 
        for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) {
                if (ee_reg == IXGBE_NVM_POLL_READ)
@@ -1303,12 +1257,11 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
                        reg = IXGBE_READ_REG(hw, IXGBE_EEWR);
 
                if (reg & IXGBE_EEPROM_RW_REG_DONE) {
-                       status = 0;
-                       break;
+                       return 0;
                }
                udelay(5);
        }
-       return status;
+       return IXGBE_ERR_EEPROM;
 }
 
 /**
@@ -1320,47 +1273,42 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg)
  **/
 static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
        u32 eec;
        u32 i;
 
        if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0)
-               status = IXGBE_ERR_SWFW_SYNC;
-
-       if (status == 0) {
-               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+               return IXGBE_ERR_SWFW_SYNC;
 
-               /* Request EEPROM Access */
-               eec |= IXGBE_EEC_REQ;
-               IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
 
-               for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
-                       eec = IXGBE_READ_REG(hw, IXGBE_EEC);
-                       if (eec & IXGBE_EEC_GNT)
-                               break;
-                       udelay(5);
-               }
+       /* Request EEPROM Access */
+       eec |= IXGBE_EEC_REQ;
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
 
-               /* Release if grant not acquired */
-               if (!(eec & IXGBE_EEC_GNT)) {
-                       eec &= ~IXGBE_EEC_REQ;
-                       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-                       hw_dbg(hw, "Could not acquire EEPROM grant\n");
+       for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) {
+               eec = IXGBE_READ_REG(hw, IXGBE_EEC);
+               if (eec & IXGBE_EEC_GNT)
+                       break;
+               udelay(5);
+       }
 
-                       hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-                       status = IXGBE_ERR_EEPROM;
-               }
+       /* Release if grant not acquired */
+       if (!(eec & IXGBE_EEC_GNT)) {
+               eec &= ~IXGBE_EEC_REQ;
+               IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+               hw_dbg(hw, "Could not acquire EEPROM grant\n");
 
-               /* Setup EEPROM for Read/Write */
-               if (status == 0) {
-                       /* Clear CS and SK */
-                       eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
-                       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
-                       IXGBE_WRITE_FLUSH(hw);
-                       udelay(1);
-               }
+               hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
+               return IXGBE_ERR_EEPROM;
        }
-       return status;
+
+       /* Setup EEPROM for Read/Write */
+       /* Clear CS and SK */
+       eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK);
+       IXGBE_WRITE_REG(hw, IXGBE_EEC, eec);
+       IXGBE_WRITE_FLUSH(hw);
+       udelay(1);
+       return 0;
 }
 
 /**
@@ -1371,7 +1319,6 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
 {
-       s32 status = IXGBE_ERR_EEPROM;
        u32 timeout = 2000;
        u32 i;
        u32 swsm;
@@ -1383,68 +1330,60 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw)
                 * set and we have the semaphore
                 */
                swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-               if (!(swsm & IXGBE_SWSM_SMBI)) {
-                       status = 0;
+               if (!(swsm & IXGBE_SWSM_SMBI))
                        break;
-               }
-               udelay(50);
+               usleep_range(50, 100);
        }
 
        if (i == timeout) {
                hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n");
-               /*
-                * this release is particularly important because our attempts
+               /* this release is particularly important because our attempts
                 * above to get the semaphore may have succeeded, and if there
                 * was a timeout, we should unconditionally clear the semaphore
                 * bits to free the driver to make progress
                 */
                ixgbe_release_eeprom_semaphore(hw);
 
-               udelay(50);
-               /*
-                * one last try
+               usleep_range(50, 100);
+               /* one last try
                 * If the SMBI bit is 0 when we read it, then the bit will be
                 * set and we have the semaphore
                 */
                swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-               if (!(swsm & IXGBE_SWSM_SMBI))
-                       status = 0;
+               if (swsm & IXGBE_SWSM_SMBI) {
+                       hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+                       return IXGBE_ERR_EEPROM;
+               }
        }
 
        /* Now get the semaphore between SW/FW through the SWESMBI bit */
-       if (status == 0) {
-               for (i = 0; i < timeout; i++) {
-                       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+       for (i = 0; i < timeout; i++) {
+               swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
 
-                       /* Set the SW EEPROM semaphore bit to request access */
-                       swsm |= IXGBE_SWSM_SWESMBI;
-                       IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
+               /* Set the SW EEPROM semaphore bit to request access */
+               swsm |= IXGBE_SWSM_SWESMBI;
+               IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm);
 
-                       /*
-                        * If we set the bit successfully then we got the
-                        * semaphore.
-                        */
-                       swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-                       if (swsm & IXGBE_SWSM_SWESMBI)
-                               break;
+               /* If we set the bit successfully then we got the
+                * semaphore.
+                */
+               swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
+               if (swsm & IXGBE_SWSM_SWESMBI)
+                       break;
 
-                       udelay(50);
-               }
+               usleep_range(50, 100);
+       }
 
-               /*
-                * Release semaphores and return error if SW EEPROM semaphore
-                * was not granted because we don't have access to the EEPROM
-                */
-               if (i >= timeout) {
-                       hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
-                       ixgbe_release_eeprom_semaphore(hw);
-                       status = IXGBE_ERR_EEPROM;
-               }
-       } else {
-               hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+       /* Release semaphores and return error if SW EEPROM semaphore
+        * was not granted because we don't have access to the EEPROM
+        */
+       if (i >= timeout) {
+               hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n");
+               ixgbe_release_eeprom_semaphore(hw);
+               return IXGBE_ERR_EEPROM;
        }
 
-       return status;
+       return 0;
 }
 
 /**
@@ -1471,7 +1410,6 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
        u16 i;
        u8 spi_stat_reg;
 
@@ -1498,10 +1436,10 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw)
         */
        if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) {
                hw_dbg(hw, "SPI EEPROM Status error\n");
-               status = IXGBE_ERR_EEPROM;
+               return IXGBE_ERR_EEPROM;
        }
 
-       return status;
+       return 0;
 }
 
 /**
@@ -2100,17 +2038,14 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
 {
-       s32 ret_val = 0;
        u32 mflcn_reg, fccfg_reg;
        u32 reg;
        u32 fcrtl, fcrth;
        int i;
 
        /* Validate the water mark configuration. */
-       if (!hw->fc.pause_time) {
-               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-               goto out;
-       }
+       if (!hw->fc.pause_time)
+               return IXGBE_ERR_INVALID_LINK_SETTINGS;
 
        /* Low water mark of zero causes XOFF floods */
        for (i = 0; i < MAX_TRAFFIC_CLASS; i++) {
@@ -2119,8 +2054,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
                        if (!hw->fc.low_water[i] ||
                            hw->fc.low_water[i] >= hw->fc.high_water[i]) {
                                hw_dbg(hw, "Invalid water mark configuration\n");
-                               ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS;
-                               goto out;
+                               return IXGBE_ERR_INVALID_LINK_SETTINGS;
                        }
                }
        }
@@ -2177,9 +2111,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
                break;
        default:
                hw_dbg(hw, "Flow control param set incorrectly\n");
-               ret_val = IXGBE_ERR_CONFIG;
-               goto out;
-               break;
+               return IXGBE_ERR_CONFIG;
        }
 
        /* Set 802.3x based flow control settings. */
@@ -2215,8 +2147,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw)
 
        IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -2277,7 +2208,7 @@ static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg,
 static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
 {
        u32 pcs_anadv_reg, pcs_lpab_reg, linkstat;
-       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+       s32 ret_val;
 
        /*
         * On multispeed fiber at 1g, bail out if
@@ -2288,7 +2219,7 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
        linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
        if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) ||
            (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1))
-               goto out;
+               return IXGBE_ERR_FC_NOT_NEGOTIATED;
 
        pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
        pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
@@ -2299,7 +2230,6 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw)
                               IXGBE_PCS1GANA_SYM_PAUSE,
                               IXGBE_PCS1GANA_ASM_PAUSE);
 
-out:
        return ret_val;
 }
 
@@ -2312,7 +2242,7 @@ out:
 static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
 {
        u32 links2, anlp1_reg, autoc_reg, links;
-       s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED;
+       s32 ret_val;
 
        /*
         * On backplane, bail out if
@@ -2321,12 +2251,12 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
         */
        links = IXGBE_READ_REG(hw, IXGBE_LINKS);
        if ((links & IXGBE_LINKS_KX_AN_COMP) == 0)
-               goto out;
+               return IXGBE_ERR_FC_NOT_NEGOTIATED;
 
        if (hw->mac.type == ixgbe_mac_82599EB) {
                links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2);
                if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0)
-                       goto out;
+                       return IXGBE_ERR_FC_NOT_NEGOTIATED;
        }
        /*
         * Read the 10g AN autoc and LP ability registers and resolve
@@ -2339,7 +2269,6 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw)
                anlp1_reg, IXGBE_AUTOC_SYM_PAUSE, IXGBE_AUTOC_ASM_PAUSE,
                IXGBE_ANLP1_SYM_PAUSE, IXGBE_ANLP1_ASM_PAUSE);
 
-out:
        return ret_val;
 }
 
@@ -2485,7 +2414,6 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
        u32 i, poll;
        u16 value;
 
@@ -2495,13 +2423,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
        /* Exit if master requests are blocked */
        if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) ||
            ixgbe_removed(hw->hw_addr))
-               goto out;
+               return 0;
 
        /* Poll for master request bit to clear */
        for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) {
                udelay(100);
                if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO))
-                       goto out;
+                       return 0;
        }
 
        /*
@@ -2524,16 +2452,13 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw)
                udelay(100);
                value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS);
                if (ixgbe_removed(hw->hw_addr))
-                       goto out;
+                       return 0;
                if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING))
-                       goto out;
+                       return 0;
        }
 
        hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n");
-       status = IXGBE_ERR_MASTER_REQUESTS_PENDING;
-
-out:
-       return status;
+       return IXGBE_ERR_MASTER_REQUESTS_PENDING;
 }
 
 /**
@@ -2708,8 +2633,8 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        bool link_up = false;
        u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-       s32 ret_val = 0;
        bool locked = false;
+       s32 ret_val;
 
        /*
         * Link must be up to auto-blink the LEDs;
@@ -2720,14 +2645,14 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        if (!link_up) {
                ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
                if (ret_val)
-                       goto out;
+                       return ret_val;
 
                autoc_reg |= IXGBE_AUTOC_AN_RESTART;
                autoc_reg |= IXGBE_AUTOC_FLU;
 
                ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
                if (ret_val)
-                       goto out;
+                       return ret_val;
 
                IXGBE_WRITE_FLUSH(hw);
 
@@ -2739,8 +2664,7 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index)
        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
        IXGBE_WRITE_FLUSH(hw);
 
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -2752,19 +2676,19 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
 {
        u32 autoc_reg = 0;
        u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
-       s32 ret_val = 0;
        bool locked = false;
+       s32 ret_val;
 
        ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg);
        if (ret_val)
-               goto out;
+               return ret_val;
 
        autoc_reg &= ~IXGBE_AUTOC_FLU;
        autoc_reg |= IXGBE_AUTOC_AN_RESTART;
 
        ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked);
        if (ret_val)
-               goto out;
+               return ret_val;
 
        led_reg &= ~IXGBE_LED_MODE_MASK(index);
        led_reg &= ~IXGBE_LED_BLINK(index);
@@ -2772,8 +2696,7 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index)
        IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg);
        IXGBE_WRITE_FLUSH(hw);
 
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -2865,7 +2788,7 @@ san_mac_addr_clr:
  **/
 u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
 {
-       u16 msix_count = 1;
+       u16 msix_count;
        u16 max_msix_count;
        u16 pcie_offset;
 
@@ -2880,7 +2803,7 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw)
                max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599;
                break;
        default:
-               return msix_count;
+               return 1;
        }
 
        msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset);
@@ -2918,10 +2841,10 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar));
 
        if (ixgbe_removed(hw->hw_addr))
-               goto done;
+               return 0;
 
        if (!mpsar_lo && !mpsar_hi)
-               goto done;
+               return 0;
 
        if (vmdq == IXGBE_CLEAR_VMDQ_ALL) {
                if (mpsar_lo) {
@@ -2943,7 +2866,6 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq)
        /* was that the last pool using this rar? */
        if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0)
                hw->mac.ops.clear_rar(hw, rar);
-done:
        return 0;
 }
 
@@ -3312,14 +3234,14 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
 
        if ((alt_san_mac_blk_offset == 0) ||
            (alt_san_mac_blk_offset == 0xFFFF))
-               goto wwn_prefix_out;
+               return 0;
 
        /* check capability in alternative san mac address block */
        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
        if (hw->eeprom.ops.read(hw, offset, &caps))
                goto wwn_prefix_err;
        if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
-               goto wwn_prefix_out;
+               return 0;
 
        /* get the corresponding prefix for WWNN/WWPN */
        offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
@@ -3330,7 +3252,6 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix,
        if (hw->eeprom.ops.read(hw, offset, wwpn_prefix))
                goto wwn_prefix_err;
 
-wwn_prefix_out:
        return 0;
 
 wwn_prefix_err:
@@ -3524,21 +3445,17 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        u32 hdr_size = sizeof(struct ixgbe_hic_hdr);
        u8 buf_len, dword_len;
 
-       s32 ret_val = 0;
-
        if (length == 0 || length & 0x3 ||
            length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) {
                hw_dbg(hw, "Buffer length failure.\n");
-               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
+               return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
        /* Check that the host interface is enabled. */
        hicr = IXGBE_READ_REG(hw, IXGBE_HICR);
        if ((hicr & IXGBE_HICR_EN) == 0) {
                hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n");
-               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
+               return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
        /* Calculate length in DWORDs */
@@ -3566,8 +3483,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        if (i == IXGBE_HI_COMMAND_TIMEOUT ||
            (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) {
                hw_dbg(hw, "Command has failed with no status valid.\n");
-               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
+               return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
        /* Calculate length in DWORDs */
@@ -3582,12 +3498,11 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
        /* If there is any thing in data position pull it in */
        buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len;
        if (buf_len == 0)
-               goto out;
+               return 0;
 
        if (length < (buf_len + hdr_size)) {
                hw_dbg(hw, "Buffer not large enough for reply message.\n");
-               ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND;
-               goto out;
+               return IXGBE_ERR_HOST_INTERFACE_COMMAND;
        }
 
        /* Calculate length in DWORDs, add 3 for odd lengths */
@@ -3599,8 +3514,7 @@ static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer,
                le32_to_cpus(&buffer[bi]);
        }
 
-out:
-       return ret_val;
+       return 0;
 }
 
 /**
@@ -3621,12 +3535,10 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
 {
        struct ixgbe_hic_drv_info fw_cmd;
        int i;
-       s32 ret_val = 0;
+       s32 ret_val;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) {
-               ret_val = IXGBE_ERR_SWFW_SYNC;
-               goto out;
-       }
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM))
+               return IXGBE_ERR_SWFW_SYNC;
 
        fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO;
        fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN;
@@ -3658,7 +3570,6 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min,
        }
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM);
-out:
        return ret_val;
 }
 
@@ -3727,28 +3638,23 @@ static const u8 ixgbe_emc_therm_limit[4] = {
 static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg,
                              u16 *ets_offset)
 {
-       s32 status = 0;
+       s32 status;
 
        status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset);
        if (status)
-               goto out;
+               return status;
 
-       if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) {
-               status = IXGBE_NOT_IMPLEMENTED;
-               goto out;
-       }
+       if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF))
+               return IXGBE_NOT_IMPLEMENTED;
 
        status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg);
        if (status)
-               goto out;
+               return status;
 
-       if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) {
-               status = IXGBE_NOT_IMPLEMENTED;
-               goto out;
-       }
+       if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED)
+               return IXGBE_NOT_IMPLEMENTED;
 
-out:
-       return status;
+       return 0;
 }
 
 /**
@@ -3759,7 +3665,7 @@ out:
  **/
 s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
+       s32 status;
        u16 ets_offset;
        u16 ets_cfg;
        u16 ets_sensor;
@@ -3768,14 +3674,12 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
        struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
 
        /* Only support thermal sensors attached to physical port 0 */
-       if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
-               status = IXGBE_NOT_IMPLEMENTED;
-               goto out;
-       }
+       if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+               return IXGBE_NOT_IMPLEMENTED;
 
        status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
        if (status)
-               goto out;
+               return status;
 
        num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK);
        if (num_sensors > IXGBE_MAX_SENSORS)
@@ -3788,7 +3692,7 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
                status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i),
                                             &ets_sensor);
                if (status)
-                       goto out;
+                       return status;
 
                sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >>
                                IXGBE_ETS_DATA_INDEX_SHIFT);
@@ -3801,11 +3705,11 @@ s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw)
                                        IXGBE_I2C_THERMAL_SENSOR_ADDR,
                                        &data->sensor[i].temp);
                        if (status)
-                               goto out;
+                               return status;
                }
        }
-out:
-       return status;
+
+       return 0;
 }
 
 /**
@@ -3817,7 +3721,7 @@ out:
  **/
 s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
+       s32 status;
        u16 ets_offset;
        u16 ets_cfg;
        u16 ets_sensor;
@@ -3830,14 +3734,12 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
        memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data));
 
        /* Only support thermal sensors attached to physical port 0 */
-       if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) {
-               status = IXGBE_NOT_IMPLEMENTED;
-               goto out;
-       }
+       if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1))
+               return IXGBE_NOT_IMPLEMENTED;
 
        status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset);
        if (status)
-               goto out;
+               return status;
 
        low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >>
                             IXGBE_ETS_LTHRES_DELTA_SHIFT);
@@ -3871,7 +3773,7 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw)
                data->sensor[i].caution_thresh = therm_limit;
                data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta;
        }
-out:
-       return status;
+
+       return 0;
 }
 
index a689ee0..48f35fc 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -87,7 +87,6 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
        int min_credit;
        int min_multiplier;
        int min_percent = 100;
-       s32 ret_val = 0;
        /* Initialization values default for Tx settings */
        u32 credit_refill       = 0;
        u32 credit_max          = 0;
@@ -95,10 +94,8 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
        u8  bw_percent          = 0;
        u8  i;
 
-       if (dcb_config == NULL) {
-               ret_val = DCB_ERR_CONFIG;
-               goto out;
-       }
+       if (!dcb_config)
+               return DCB_ERR_CONFIG;
 
        min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) /
                        DCB_CREDIT_QUANTUM;
@@ -174,8 +171,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw,
                p->data_credits_max = (u16)credit_max;
        }
 
-out:
-       return ret_val;
+       return 0;
 }
 
 void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en)
@@ -236,7 +232,7 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
 
        /* If tc is 0 then DCB is likely not enabled or supported */
        if (!tc)
-               goto out;
+               return 0;
 
        /*
         * Test from maximum TC to 1 and report the first match we find.  If
@@ -247,7 +243,7 @@ u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up)
                if (prio_mask & tc_config[tc].path[direction].up_to_tc_bitmap)
                        break;
        }
-out:
+
        return tc;
 }
 
@@ -269,7 +265,6 @@ void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map)
 s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
                        struct ixgbe_dcb_config *dcb_config)
 {
-       s32 ret = 0;
        u8 pfc_en;
        u8 ptype[MAX_TRAFFIC_CLASS];
        u8 bwgid[MAX_TRAFFIC_CLASS];
@@ -287,37 +282,31 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw,
 
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
-               ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
-                                               bwgid, ptype);
-               break;
+               return ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max,
+                                                bwgid, ptype);
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
-                                               bwgid, ptype, prio_tc);
-               break;
+               return ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max,
+                                                bwgid, ptype, prio_tc);
        default:
                break;
        }
-       return ret;
+       return 0;
 }
 
 /* Helper routines to abstract HW specifics from DCB netlink ops */
 s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc)
 {
-       int ret = -EINVAL;
-
        switch (hw->mac.type) {
        case ixgbe_mac_82598EB:
-               ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en);
-               break;
+               return ixgbe_dcb_config_pfc_82598(hw, pfc_en);
        case ixgbe_mac_82599EB:
        case ixgbe_mac_X540:
-               ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
-               break;
+               return ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc);
        default:
                break;
        }
-       return ret;
+       return -EINVAL;
 }
 
 s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame)
index 5172b6b..58a7f53 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -153,7 +153,6 @@ static u8 ixgbe_dcbnl_get_state(struct net_device *netdev)
 static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
-       int err = 0;
 
        /* Fail command if not in CEE mode */
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
@@ -161,12 +160,10 @@ static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state)
 
        /* verify there is something to do, if not then exit */
        if (!state == !(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
-               goto out;
+               return 0;
 
-       err = ixgbe_setup_tc(netdev,
-                            state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
-out:
-       return !!err;
+       return !!ixgbe_setup_tc(netdev,
+                               state ? adapter->dcb_cfg.num_tcs.pg_tcs : 0);
 }
 
 static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev,
@@ -331,12 +328,12 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev)
 
        /* Fail command if not in CEE mode */
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return ret;
+               return DCB_NO_HW_CHG;
 
        adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter,
                                                      MAX_TRAFFIC_CLASS);
        if (!adapter->dcb_set_bitmap)
-               return ret;
+               return DCB_NO_HW_CHG;
 
        if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) {
                u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS];
@@ -460,7 +457,6 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num)
                        break;
                default:
                        return -EINVAL;
-                       break;
                }
        } else {
                return -EINVAL;
@@ -495,10 +491,10 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state)
  * @id: id is either ether type or TCP/UDP port number
  *
  * Returns : on success, returns a non-zero 802.1p user priority bitmap
- * otherwise returns 0 as the invalid user priority bitmap to indicate an
+ * otherwise returns -EINVAL as the invalid user priority bitmap to indicate an
  * error.
  */
-static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
+static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct dcb_app app = {
@@ -507,7 +503,7 @@ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id)
                             };
 
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE))
-               return 0;
+               return -EINVAL;
 
        return dcb_getapp(netdev, &app);
 }
@@ -537,7 +533,7 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
-       int i, err = 0;
+       int i, err;
        __u8 max_tc = 0;
        __u8 map_chg = 0;
 
@@ -574,17 +570,15 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev,
        if (max_tc > adapter->dcb_cfg.num_tcs.pg_tcs)
                return -EINVAL;
 
-       if (max_tc != netdev_get_num_tc(dev))
+       if (max_tc != netdev_get_num_tc(dev)) {
                err = ixgbe_setup_tc(dev, max_tc);
-       else if (map_chg)
+               if (err)
+                       return err;
+       } else if (map_chg) {
                ixgbe_dcbnl_devreset(dev);
+       }
 
-       if (err)
-               goto err_out;
-
-       err = ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
-err_out:
-       return err;
+       return ixgbe_dcb_hw_ets(&adapter->hw, ets, max_frame);
 }
 
 static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev,
@@ -648,10 +642,10 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
                                   struct dcb_app *app)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
-       int err = -EINVAL;
+       int err;
 
        if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE))
-               return err;
+               return -EINVAL;
 
        err = dcb_ieee_setapp(dev, app);
        if (err)
@@ -663,7 +657,7 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev,
                u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
                if (app_mask & (1 << adapter->fcoe.up))
-                       return err;
+                       return 0;
 
                adapter->fcoe.up = app->priority;
                ixgbe_dcbnl_devreset(dev);
@@ -706,7 +700,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev,
                u8 app_mask = dcb_ieee_getapp_mask(dev, app);
 
                if (app_mask & (1 << adapter->fcoe.up))
-                       return err;
+                       return 0;
 
                adapter->fcoe.up = app_mask ?
                                   ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC;
index a452730..94a1c07 100644 (file)
@@ -1408,7 +1408,6 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data)
        default:
                *data = 1;
                return 1;
-               break;
        }
 
        /*
@@ -2866,7 +2865,6 @@ static int ixgbe_get_ts_info(struct net_device *dev,
                break;
        default:
                return ethtool_op_get_ts_info(dev, info);
-               break;
        }
        return 0;
 }
index 25a3dfe..2ad91cb 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -67,23 +67,23 @@ static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp)
  */
 int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
 {
-       int len = 0;
+       int len;
        struct ixgbe_fcoe *fcoe;
        struct ixgbe_adapter *adapter;
        struct ixgbe_fcoe_ddp *ddp;
        u32 fcbuff;
 
        if (!netdev)
-               goto out_ddp_put;
+               return 0;
 
        if (xid >= IXGBE_FCOE_DDP_MAX)
-               goto out_ddp_put;
+               return 0;
 
        adapter = netdev_priv(netdev);
        fcoe = &adapter->fcoe;
        ddp = &fcoe->ddp[xid];
        if (!ddp->udl)
-               goto out_ddp_put;
+               return 0;
 
        len = ddp->len;
        /* if there an error, force to invalidate ddp context */
@@ -114,7 +114,6 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid)
 
        ixgbe_fcoe_clear_ddp(ddp);
 
-out_ddp_put:
        return len;
 }
 
@@ -394,17 +393,17 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                xid =  be16_to_cpu(fh->fh_rx_id);
 
        if (xid >= IXGBE_FCOE_DDP_MAX)
-               goto ddp_out;
+               return -EINVAL;
 
        fcoe = &adapter->fcoe;
        ddp = &fcoe->ddp[xid];
        if (!ddp->udl)
-               goto ddp_out;
+               return -EINVAL;
 
        ddp_err = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCEOFE |
                                              IXGBE_RXDADV_ERR_FCERR);
        if (ddp_err)
-               goto ddp_out;
+               return -EINVAL;
 
        switch (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_FCSTAT)) {
        /* return 0 to bypass going to ULD for DDPed data */
@@ -447,7 +446,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter,
                crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc));
                crc->fcoe_eof = FC_EOF_T;
        }
-ddp_out:
+
        return rc;
 }
 
@@ -878,7 +877,6 @@ int ixgbe_fcoe_disable(struct net_device *netdev)
  */
 int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
 {
-       int rc = -EINVAL;
        u16 prefix = 0xffff;
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
        struct ixgbe_mac_info *mac = &adapter->hw.mac;
@@ -903,9 +901,9 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
                       ((u64) mac->san_addr[3] << 16) |
                       ((u64) mac->san_addr[4] << 8)  |
                       ((u64) mac->san_addr[5]);
-               rc = 0;
+               return 0;
        }
-       return rc;
+       return -EINVAL;
 }
 
 /**
index f5aa331..5384ed3 100644 (file)
@@ -570,7 +570,7 @@ static void ixgbe_dump(struct ixgbe_adapter *adapter)
 
        /* Print TX Ring Summary */
        if (!netdev || !netif_running(netdev))
-               goto exit;
+               return;
 
        dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
        pr_info(" %s     %s              %s        %s\n",
@@ -685,7 +685,7 @@ rx_ring_summary:
 
        /* Print RX Rings */
        if (!netif_msg_rx_status(adapter))
-               goto exit;
+               return;
 
        dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
 
@@ -787,9 +787,6 @@ rx_ring_summary:
 
                }
        }
-
-exit:
-       return;
 }
 
 static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter)
@@ -1011,7 +1008,6 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
        u32 tx_done = ixgbe_get_tx_completed(tx_ring);
        u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
        u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
-       bool ret = false;
 
        clear_check_for_tx_hang(tx_ring);
 
@@ -1027,18 +1023,16 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
         * run the check_tx_hang logic with a transmit completion
         * pending but without time to complete it yet.
         */
-       if ((tx_done_old == tx_done) && tx_pending) {
+       if (tx_done_old == tx_done && tx_pending)
                /* make sure it is true for two checks in a row */
-               ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
-                                      &tx_ring->state);
-       } else {
-               /* update completed stats and continue */
-               tx_ring->tx_stats.tx_done_old = tx_done;
-               /* reset the countdown */
-               clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
-       }
+               return test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+                                       &tx_ring->state);
+       /* update completed stats and continue */
+       tx_ring->tx_stats.tx_done_old = tx_done;
+       /* reset the countdown */
+       clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
 
-       return ret;
+       return false;
 }
 
 /**
@@ -4701,18 +4695,18 @@ static int ixgbe_non_sfp_link_config(struct ixgbe_hw *hw)
                ret = hw->mac.ops.check_link(hw, &speed, &link_up, false);
 
        if (ret)
-               goto link_cfg_out;
+               return ret;
 
        speed = hw->phy.autoneg_advertised;
        if ((!speed) && (hw->mac.ops.get_link_capabilities))
                ret = hw->mac.ops.get_link_capabilities(hw, &speed,
                                                        &autoneg);
        if (ret)
-               goto link_cfg_out;
+               return ret;
 
        if (hw->mac.ops.setup_link)
                ret = hw->mac.ops.setup_link(hw, speed, link_up);
-link_cfg_out:
+
        return ret;
 }
 
@@ -7973,23 +7967,32 @@ static const struct net_device_ops ixgbe_netdev_ops = {
  **/
 static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter)
 {
-       struct list_head *entry;
+       struct pci_dev *entry, *pdev = adapter->pdev;
        int physfns = 0;
 
        /* Some cards can not use the generic count PCIe functions method,
         * because they are behind a parent switch, so we hardcode these with
         * the correct number of functions.
         */
-       if (ixgbe_pcie_from_parent(&adapter->hw)) {
+       if (ixgbe_pcie_from_parent(&adapter->hw))
                physfns = 4;
-       } else {
-               list_for_each(entry, &adapter->pdev->bus_list) {
-                       struct pci_dev *pdev =
-                               list_entry(entry, struct pci_dev, bus_list);
-                       /* don't count virtual functions */
-                       if (!pdev->is_virtfn)
-                               physfns++;
-               }
+
+       list_for_each_entry(entry, &adapter->pdev->bus->devices, bus_list) {
+               /* don't count virtual functions */
+               if (entry->is_virtfn)
+                       continue;
+
+               /* When the devices on the bus don't all match our device ID,
+                * we can't reliably determine the correct number of
+                * functions. This can occur if a function has been direct
+                * attached to a virtual machine using VT-d, for example. In
+                * this case, simply return -1 to indicate this.
+                */
+               if ((entry->vendor != pdev->vendor) ||
+                   (entry->device != pdev->device))
+                       return -1;
+
+               physfns++;
        }
 
        return physfns;
@@ -8161,7 +8164,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
        netdev->netdev_ops = &ixgbe_netdev_ops;
        ixgbe_set_ethtool_ops(netdev);
        netdev->watchdog_timeo = 5 * HZ;
-       strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+       strlcpy(netdev->name, pci_name(pdev), sizeof(netdev->name));
 
        adapter->bd_number = cards_found;
 
@@ -8384,11 +8387,14 @@ skip_sriov:
                expected_gts = ixgbe_enumerate_functions(adapter) * 10;
                break;
        }
-       ixgbe_check_minimum_link(adapter, expected_gts);
 
-       err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH);
+       /* don't check link if we failed to enumerate functions */
+       if (expected_gts > 0)
+               ixgbe_check_minimum_link(adapter, expected_gts);
+
+       err = ixgbe_read_pba_string_generic(hw, part_str, sizeof(part_str));
        if (err)
-               strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH);
+               strlcpy(part_str, "Unknown", sizeof(part_str));
        if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present)
                e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n",
                           hw->mac.type, hw->phy.type, hw->phy.sfp_type,
@@ -8477,7 +8483,7 @@ err_alloc_etherdev:
                                     pci_select_bars(pdev, IORESOURCE_MEM));
 err_pci_reg:
 err_dma:
-       if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
+       if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state))
                pci_disable_device(pdev);
        return err;
 }
index 1918e0a..cc8f012 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
 s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
 
        /* limit read to size of mailbox */
        if (size > mbx->size)
                size = mbx->size;
 
-       if (mbx->ops.read)
-               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+       if (!mbx->ops.read)
+               return IXGBE_ERR_MBX;
 
-       return ret_val;
+       return mbx->ops.read(hw, msg, size, mbx_id);
 }
 
 /**
@@ -67,15 +66,14 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = 0;
 
        if (size > mbx->size)
-               ret_val = IXGBE_ERR_MBX;
+               return IXGBE_ERR_MBX;
 
-       else if (mbx->ops.write)
-               ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+       if (!mbx->ops.write)
+               return IXGBE_ERR_MBX;
 
-       return ret_val;
+       return mbx->ops.write(hw, msg, size, mbx_id);
 }
 
 /**
@@ -88,12 +86,11 @@ s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id)
 s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
 
-       if (mbx->ops.check_for_msg)
-               ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+       if (!mbx->ops.check_for_msg)
+               return IXGBE_ERR_MBX;
 
-       return ret_val;
+       return mbx->ops.check_for_msg(hw, mbx_id);
 }
 
 /**
@@ -106,12 +103,11 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
 s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
 
-       if (mbx->ops.check_for_ack)
-               ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+       if (!mbx->ops.check_for_ack)
+               return IXGBE_ERR_MBX;
 
-       return ret_val;
+       return mbx->ops.check_for_ack(hw, mbx_id);
 }
 
 /**
@@ -124,12 +120,11 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
 s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
 
-       if (mbx->ops.check_for_rst)
-               ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+       if (!mbx->ops.check_for_rst)
+               return IXGBE_ERR_MBX;
 
-       return ret_val;
+       return mbx->ops.check_for_rst(hw, mbx_id);
 }
 
 /**
@@ -145,17 +140,16 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id)
        int countdown = mbx->timeout;
 
        if (!countdown || !mbx->ops.check_for_msg)
-               goto out;
+               return IXGBE_ERR_MBX;
 
-       while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+       while (mbx->ops.check_for_msg(hw, mbx_id)) {
                countdown--;
                if (!countdown)
-                       break;
+                       return IXGBE_ERR_MBX;
                udelay(mbx->usec_delay);
        }
 
-out:
-       return countdown ? 0 : IXGBE_ERR_MBX;
+       return 0;
 }
 
 /**
@@ -171,17 +165,16 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id)
        int countdown = mbx->timeout;
 
        if (!countdown || !mbx->ops.check_for_ack)
-               goto out;
+               return IXGBE_ERR_MBX;
 
-       while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+       while (mbx->ops.check_for_ack(hw, mbx_id)) {
                countdown--;
                if (!countdown)
-                       break;
+                       return IXGBE_ERR_MBX;
                udelay(mbx->usec_delay);
        }
 
-out:
-       return countdown ? 0 : IXGBE_ERR_MBX;
+       return 0;
 }
 
 /**
@@ -198,18 +191,17 @@ static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
                                 u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
+       s32 ret_val;
 
        if (!mbx->ops.read)
-               goto out;
+               return IXGBE_ERR_MBX;
 
        ret_val = ixgbe_poll_for_msg(hw, mbx_id);
+       if (ret_val)
+               return ret_val;
 
-       /* if ack received read message, otherwise we timed out */
-       if (!ret_val)
-               ret_val = mbx->ops.read(hw, msg, size, mbx_id);
-out:
-       return ret_val;
+       /* if ack received read message */
+       return mbx->ops.read(hw, msg, size, mbx_id);
 }
 
 /**
@@ -226,33 +218,31 @@ static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size,
                           u16 mbx_id)
 {
        struct ixgbe_mbx_info *mbx = &hw->mbx;
-       s32 ret_val = IXGBE_ERR_MBX;
+       s32 ret_val;
 
        /* exit if either we can't write or there isn't a defined timeout */
        if (!mbx->ops.write || !mbx->timeout)
-               goto out;
+               return IXGBE_ERR_MBX;
 
        /* send msg */
        ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+       if (ret_val)
+               return ret_val;
 
        /* if msg sent wait until we receive an ack */
-       if (!ret_val)
-               ret_val = ixgbe_poll_for_ack(hw, mbx_id);
-out:
-       return ret_val;
+       return ixgbe_poll_for_ack(hw, mbx_id);
 }
 
 static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
 {
        u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index));
-       s32 ret_val = IXGBE_ERR_MBX;
 
        if (mbvficr & mask) {
-               ret_val = 0;
                IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask);
+               return 0;
        }
 
-       return ret_val;
+       return IXGBE_ERR_MBX;
 }
 
 /**
@@ -264,17 +254,16 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index)
  **/
 static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
 {
-       s32 ret_val = IXGBE_ERR_MBX;
        s32 index = IXGBE_MBVFICR_INDEX(vf_number);
        u32 vf_bit = vf_number % 16;
 
        if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit,
                                    index)) {
-               ret_val = 0;
                hw->mbx.stats.reqs++;
+               return 0;
        }
 
-       return ret_val;
+       return IXGBE_ERR_MBX;
 }
 
 /**
@@ -286,17 +275,16 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number)
  **/
 static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number)
 {
-       s32 ret_val = IXGBE_ERR_MBX;
        s32 index = IXGBE_MBVFICR_INDEX(vf_number);
        u32 vf_bit = vf_number % 16;
 
        if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit,
                                    index)) {
-               ret_val = 0;
                hw->mbx.stats.acks++;
+               return 0;
        }
 
-       return ret_val;
+       return IXGBE_ERR_MBX;
 }
 
 /**
@@ -311,7 +299,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
        u32 reg_offset = (vf_number < 32) ? 0 : 1;
        u32 vf_shift = vf_number % 32;
        u32 vflre = 0;
-       s32 ret_val = IXGBE_ERR_MBX;
 
        switch (hw->mac.type) {
        case ixgbe_mac_82599EB:
@@ -325,12 +312,12 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
        }
 
        if (vflre & (1 << vf_shift)) {
-               ret_val = 0;
                IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift));
                hw->mbx.stats.rsts++;
+               return 0;
        }
 
-       return ret_val;
+       return IXGBE_ERR_MBX;
 }
 
 /**
@@ -342,7 +329,6 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number)
  **/
 static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
 {
-       s32 ret_val = IXGBE_ERR_MBX;
        u32 p2v_mailbox;
 
        /* Take ownership of the buffer */
@@ -351,9 +337,9 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number)
        /* reserve mailbox for vf use */
        p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number));
        if (p2v_mailbox & IXGBE_PFMAILBOX_PFU)
-               ret_val = 0;
+               return 0;
 
-       return ret_val;
+       return IXGBE_ERR_MBX;
 }
 
 /**
@@ -374,7 +360,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
        /* lock the mailbox to prevent pf/vf race condition */
        ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
        if (ret_val)
-               goto out_no_write;
+               return ret_val;
 
        /* flush msg and acks as we are overwriting the message buffer */
        ixgbe_check_for_msg_pf(hw, vf_number);
@@ -390,9 +376,7 @@ static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
        /* update stats */
        hw->mbx.stats.msgs_tx++;
 
-out_no_write:
-       return ret_val;
-
+       return 0;
 }
 
 /**
@@ -415,7 +399,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
        /* lock the mailbox to prevent pf/vf race condition */
        ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number);
        if (ret_val)
-               goto out_no_read;
+               return ret_val;
 
        /* copy the message to the mailbox memory buffer */
        for (i = 0; i < size; i++)
@@ -427,8 +411,7 @@ static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size,
        /* update stats */
        hw->mbx.stats.msgs_rx++;
 
-out_no_read:
-       return ret_val;
+       return 0;
 }
 
 #ifdef CONFIG_PCI_IOV
index ff68b7a..11f02ea 100644 (file)
@@ -57,7 +57,6 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw);
  **/
 s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
 {
-       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
        u32 phy_addr;
        u16 ext_ability = 0;
 
@@ -84,18 +83,14 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw)
                                                         ixgbe_phy_generic;
                                }
 
-                               status = 0;
-                               break;
+                               return 0;
                        }
                }
                /* clear value if nothing found */
-               if (status != 0)
-                       hw->phy.mdio.prtad = 0;
-       } else {
-               status = 0;
+               hw->phy.mdio.prtad = 0;
+               return IXGBE_ERR_PHY_ADDR_INVALID;
        }
-
-       return status;
+       return 0;
 }
 
 /**
@@ -192,16 +187,16 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
                status = ixgbe_identify_phy_generic(hw);
 
        if (status != 0 || hw->phy.type == ixgbe_phy_none)
-               goto out;
+               return status;
 
        /* Don't reset PHY if it's shut down due to overtemp. */
        if (!hw->phy.reset_if_overtemp &&
            (IXGBE_ERR_OVERTEMP == hw->phy.ops.check_overtemp(hw)))
-               goto out;
+               return 0;
 
        /* Blocked by MNG FW so bail */
        if (ixgbe_check_reset_blocked(hw))
-               goto out;
+               return 0;
 
        /*
         * Perform soft PHY reset to the PHY_XS.
@@ -227,12 +222,11 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw)
        }
 
        if (ctrl & MDIO_CTRL1_RESET) {
-               status = IXGBE_ERR_RESET_FAILED;
                hw_dbg(hw, "PHY reset polling failed to complete.\n");
+               return IXGBE_ERR_RESET_FAILED;
        }
 
-out:
-       return status;
+       return 0;
 }
 
 /**
@@ -333,7 +327,7 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                                                phy_data);
                hw->mac.ops.release_swfw_sync(hw, gssr);
        } else {
-               status = IXGBE_ERR_SWFW_SYNC;
+               return IXGBE_ERR_SWFW_SYNC;
        }
 
        return status;
@@ -436,7 +430,7 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr,
                                                 phy_data);
                hw->mac.ops.release_swfw_sync(hw, gssr);
        } else {
-               status = IXGBE_ERR_SWFW_SYNC;
+               return IXGBE_ERR_SWFW_SYNC;
        }
 
        return status;
@@ -509,7 +503,7 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
 
        /* Blocked by MNG FW so don't reset PHY */
        if (ixgbe_check_reset_blocked(hw))
-               return status;
+               return 0;
 
        /* Restart PHY autonegotiation and wait for completion */
        hw->phy.ops.read_reg(hw, MDIO_CTRL1,
@@ -535,8 +529,8 @@ s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw)
        }
 
        if (time_out == max_time_out) {
-               status = IXGBE_ERR_LINK_SETUP;
                hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n");
+               return IXGBE_ERR_LINK_SETUP;
        }
 
        return status;
@@ -585,7 +579,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
                                               ixgbe_link_speed *speed,
                                               bool *autoneg)
 {
-       s32 status = IXGBE_ERR_LINK_SETUP;
+       s32 status;
        u16 speed_ability;
 
        *speed = 0;
@@ -616,7 +610,7 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw,
 s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
                             bool *link_up)
 {
-       s32 status = 0;
+       s32 status;
        u32 time_out;
        u32 max_time_out = 10;
        u16 phy_link = 0;
@@ -662,7 +656,7 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
  **/
 s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
+       s32 status;
        u32 time_out;
        u32 max_time_out = 10;
        u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
@@ -719,7 +713,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 
        /* Blocked by MNG FW so don't reset PHY */
        if (ixgbe_check_reset_blocked(hw))
-               return status;
+               return 0;
 
        /* Restart PHY autonegotiation and wait for completion */
        hw->phy.ops.read_reg(hw, MDIO_CTRL1,
@@ -744,8 +738,8 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
        }
 
        if (time_out == max_time_out) {
-               status = IXGBE_ERR_LINK_SETUP;
                hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n");
+               return IXGBE_ERR_LINK_SETUP;
        }
 
        return status;
@@ -759,7 +753,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
 s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
                                       u16 *firmware_version)
 {
-       s32 status = 0;
+       s32 status;
 
        status = hw->phy.ops.read_reg(hw, TNX_FW_REV,
                                      MDIO_MMD_VEND1,
@@ -776,7 +770,7 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw,
 s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw,
                                           u16 *firmware_version)
 {
-       s32 status = 0;
+       s32 status;
 
        status = hw->phy.ops.read_reg(hw, AQ_FW_REV,
                                      MDIO_MMD_VEND1,
@@ -795,12 +789,12 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
        bool end_data = false;
        u16 list_offset, data_offset;
        u16 phy_data = 0;
-       s32 ret_val = 0;
+       s32 ret_val;
        u32 i;
 
        /* Blocked by MNG FW so bail */
        if (ixgbe_check_reset_blocked(hw))
-               goto out;
+               return 0;
 
        hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data);
 
@@ -818,15 +812,14 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
 
        if ((phy_data & MDIO_CTRL1_RESET) != 0) {
                hw_dbg(hw, "PHY reset did not complete.\n");
-               ret_val = IXGBE_ERR_PHY;
-               goto out;
+               return IXGBE_ERR_PHY;
        }
 
        /* Get init offsets */
        ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
                                                      &data_offset);
-       if (ret_val != 0)
-               goto out;
+       if (ret_val)
+               return ret_val;
 
        ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc);
        data_offset++;
@@ -876,18 +869,15 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw)
                                hw_dbg(hw, "SOL\n");
                        } else {
                                hw_dbg(hw, "Bad control value\n");
-                               ret_val = IXGBE_ERR_PHY;
-                               goto out;
+                               return IXGBE_ERR_PHY;
                        }
                        break;
                default:
                        hw_dbg(hw, "Bad control type\n");
-                       ret_val = IXGBE_ERR_PHY;
-                       goto out;
+                       return IXGBE_ERR_PHY;
                }
        }
 
-out:
        return ret_val;
 
 err_eeprom:
@@ -903,34 +893,29 @@ err_eeprom:
  **/
 s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw)
 {
-       s32 status = IXGBE_ERR_SFP_NOT_PRESENT;
-
        switch (hw->mac.ops.get_media_type(hw)) {
        case ixgbe_media_type_fiber:
-               status = ixgbe_identify_sfp_module_generic(hw);
-               break;
+               return ixgbe_identify_sfp_module_generic(hw);
        case ixgbe_media_type_fiber_qsfp:
-               status = ixgbe_identify_qsfp_module_generic(hw);
-               break;
+               return ixgbe_identify_qsfp_module_generic(hw);
        default:
                hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-               status = IXGBE_ERR_SFP_NOT_PRESENT;
-               break;
+               return IXGBE_ERR_SFP_NOT_PRESENT;
        }
 
-       return status;
+       return IXGBE_ERR_SFP_NOT_PRESENT;
 }
 
 /**
  *  ixgbe_identify_sfp_module_generic - Identifies SFP modules
  *  @hw: pointer to hardware structure
-*
+ *
  *  Searches for and identifies the SFP module and assigns appropriate PHY type.
  **/
 s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 {
        struct ixgbe_adapter *adapter = hw->back;
-       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       s32 status;
        u32 vendor_oui = 0;
        enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
        u8 identifier = 0;
@@ -943,15 +928,14 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
        if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) {
                hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-               status = IXGBE_ERR_SFP_NOT_PRESENT;
-               goto out;
+               return IXGBE_ERR_SFP_NOT_PRESENT;
        }
 
        status = hw->phy.ops.read_i2c_eeprom(hw,
                                             IXGBE_SFF_IDENTIFIER,
                                             &identifier);
 
-       if (status != 0)
+       if (status)
                goto err_read_i2c_eeprom;
 
        /* LAN ID is needed for sfp_type determination */
@@ -959,239 +943,224 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw)
 
        if (identifier != IXGBE_SFF_IDENTIFIER_SFP) {
                hw->phy.type = ixgbe_phy_sfp_unsupported;
-               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-       } else {
-               status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                    IXGBE_SFF_1GBE_COMP_CODES,
-                                                    &comp_codes_1g);
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+       }
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_1GBE_COMP_CODES,
+                                            &comp_codes_1g);
 
-               if (status != 0)
-                       goto err_read_i2c_eeprom;
+       if (status)
+               goto err_read_i2c_eeprom;
 
-               status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                    IXGBE_SFF_10GBE_COMP_CODES,
-                                                    &comp_codes_10g);
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_10GBE_COMP_CODES,
+                                            &comp_codes_10g);
 
-               if (status != 0)
-                       goto err_read_i2c_eeprom;
-               status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                    IXGBE_SFF_CABLE_TECHNOLOGY,
-                                                    &cable_tech);
+       if (status)
+               goto err_read_i2c_eeprom;
+       status = hw->phy.ops.read_i2c_eeprom(hw,
+                                            IXGBE_SFF_CABLE_TECHNOLOGY,
+                                            &cable_tech);
 
-               if (status != 0)
-                       goto err_read_i2c_eeprom;
+       if (status)
+               goto err_read_i2c_eeprom;
 
-                /* ID Module
-                 * =========
-                 * 0   SFP_DA_CU
-                 * 1   SFP_SR
-                 * 2   SFP_LR
-                 * 3   SFP_DA_CORE0 - 82599-specific
-                 * 4   SFP_DA_CORE1 - 82599-specific
-                 * 5   SFP_SR/LR_CORE0 - 82599-specific
-                 * 6   SFP_SR/LR_CORE1 - 82599-specific
-                 * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
-                 * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
-                 * 9   SFP_1g_cu_CORE0 - 82599-specific
-                 * 10  SFP_1g_cu_CORE1 - 82599-specific
-                 * 11  SFP_1g_sx_CORE0 - 82599-specific
-                 * 12  SFP_1g_sx_CORE1 - 82599-specific
-                 */
-               if (hw->mac.type == ixgbe_mac_82598EB) {
-                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-                               hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
-                               hw->phy.sfp_type = ixgbe_sfp_type_sr;
-                       else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
-                               hw->phy.sfp_type = ixgbe_sfp_type_lr;
+        /* ID Module
+         * =========
+         * 0   SFP_DA_CU
+         * 1   SFP_SR
+         * 2   SFP_LR
+         * 3   SFP_DA_CORE0 - 82599-specific
+         * 4   SFP_DA_CORE1 - 82599-specific
+         * 5   SFP_SR/LR_CORE0 - 82599-specific
+         * 6   SFP_SR/LR_CORE1 - 82599-specific
+         * 7   SFP_act_lmt_DA_CORE0 - 82599-specific
+         * 8   SFP_act_lmt_DA_CORE1 - 82599-specific
+         * 9   SFP_1g_cu_CORE0 - 82599-specific
+         * 10  SFP_1g_cu_CORE1 - 82599-specific
+         * 11  SFP_1g_sx_CORE0 - 82599-specific
+         * 12  SFP_1g_sx_CORE1 - 82599-specific
+         */
+       if (hw->mac.type == ixgbe_mac_82598EB) {
+               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                       hw->phy.sfp_type = ixgbe_sfp_type_da_cu;
+               else if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)
+                       hw->phy.sfp_type = ixgbe_sfp_type_sr;
+               else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)
+                       hw->phy.sfp_type = ixgbe_sfp_type_lr;
+               else
+                       hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+       } else if (hw->mac.type == ixgbe_mac_82599EB) {
+               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
+                       if (hw->bus.lan_id == 0)
+                               hw->phy.sfp_type =
+                                            ixgbe_sfp_type_da_cu_core0;
                        else
-                               hw->phy.sfp_type = ixgbe_sfp_type_unknown;
-               } else if (hw->mac.type == ixgbe_mac_82599EB) {
-                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) {
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                                    ixgbe_sfp_type_da_cu_core1;
-                       } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
-                               hw->phy.ops.read_i2c_eeprom(
-                                               hw, IXGBE_SFF_CABLE_SPEC_COMP,
-                                               &cable_spec);
-                               if (cable_spec &
-                                   IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
-                                       if (hw->bus.lan_id == 0)
-                                               hw->phy.sfp_type =
-                                               ixgbe_sfp_type_da_act_lmt_core0;
-                                       else
-                                               hw->phy.sfp_type =
-                                               ixgbe_sfp_type_da_act_lmt_core1;
-                               } else {
-                                       hw->phy.sfp_type =
-                                                       ixgbe_sfp_type_unknown;
-                               }
-                       } else if (comp_codes_10g &
-                                  (IXGBE_SFF_10GBASESR_CAPABLE |
-                                   IXGBE_SFF_10GBASELR_CAPABLE)) {
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                                     ixgbe_sfp_type_srlr_core1;
-                       } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                               ixgbe_sfp_type_1g_cu_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                               ixgbe_sfp_type_1g_cu_core1;
-                       } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
-                               if (hw->bus.lan_id == 0)
-                                       hw->phy.sfp_type =
-                                               ixgbe_sfp_type_1g_sx_core0;
-                               else
-                                       hw->phy.sfp_type =
-                                               ixgbe_sfp_type_1g_sx_core1;
-                       } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
+                               hw->phy.sfp_type =
+                                            ixgbe_sfp_type_da_cu_core1;
+               } else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE) {
+                       hw->phy.ops.read_i2c_eeprom(
+                                       hw, IXGBE_SFF_CABLE_SPEC_COMP,
+                                       &cable_spec);
+                       if (cable_spec &
+                           IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING) {
                                if (hw->bus.lan_id == 0)
                                        hw->phy.sfp_type =
-                                               ixgbe_sfp_type_1g_lx_core0;
+                                       ixgbe_sfp_type_da_act_lmt_core0;
                                else
                                        hw->phy.sfp_type =
-                                               ixgbe_sfp_type_1g_lx_core1;
+                                       ixgbe_sfp_type_da_act_lmt_core1;
                        } else {
-                               hw->phy.sfp_type = ixgbe_sfp_type_unknown;
+                               hw->phy.sfp_type =
+                                               ixgbe_sfp_type_unknown;
                        }
+               } else if (comp_codes_10g &
+                          (IXGBE_SFF_10GBASESR_CAPABLE |
+                           IXGBE_SFF_10GBASELR_CAPABLE)) {
+                       if (hw->bus.lan_id == 0)
+                               hw->phy.sfp_type =
+                                             ixgbe_sfp_type_srlr_core0;
+                       else
+                               hw->phy.sfp_type =
+                                             ixgbe_sfp_type_srlr_core1;
+               } else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) {
+                       if (hw->bus.lan_id == 0)
+                               hw->phy.sfp_type =
+                                       ixgbe_sfp_type_1g_cu_core0;
+                       else
+                               hw->phy.sfp_type =
+                                       ixgbe_sfp_type_1g_cu_core1;
+               } else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) {
+                       if (hw->bus.lan_id == 0)
+                               hw->phy.sfp_type =
+                                       ixgbe_sfp_type_1g_sx_core0;
+                       else
+                               hw->phy.sfp_type =
+                                       ixgbe_sfp_type_1g_sx_core1;
+               } else if (comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) {
+                       if (hw->bus.lan_id == 0)
+                               hw->phy.sfp_type =
+                                       ixgbe_sfp_type_1g_lx_core0;
+                       else
+                               hw->phy.sfp_type =
+                                       ixgbe_sfp_type_1g_lx_core1;
+               } else {
+                       hw->phy.sfp_type = ixgbe_sfp_type_unknown;
                }
+       }
 
-               if (hw->phy.sfp_type != stored_sfp_type)
-                       hw->phy.sfp_setup_needed = true;
-
-               /* Determine if the SFP+ PHY is dual speed or not. */
-               hw->phy.multispeed_fiber = false;
-               if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
-                  (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
-                  ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
-                  (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
-                       hw->phy.multispeed_fiber = true;
-
-               /* Determine PHY vendor */
-               if (hw->phy.type != ixgbe_phy_nl) {
-                       hw->phy.id = identifier;
-                       status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE0,
-                                                   &oui_bytes[0]);
-
-                       if (status != 0)
-                               goto err_read_i2c_eeprom;
-
-                       status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE1,
-                                                   &oui_bytes[1]);
-
-                       if (status != 0)
-                               goto err_read_i2c_eeprom;
-
-                       status = hw->phy.ops.read_i2c_eeprom(hw,
-                                                   IXGBE_SFF_VENDOR_OUI_BYTE2,
-                                                   &oui_bytes[2]);
-
-                       if (status != 0)
-                               goto err_read_i2c_eeprom;
-
-                       vendor_oui =
-                         ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
-                          (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
-                          (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
-
-                       switch (vendor_oui) {
-                       case IXGBE_SFF_VENDOR_OUI_TYCO:
-                               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-                                       hw->phy.type =
-                                                   ixgbe_phy_sfp_passive_tyco;
-                               break;
-                       case IXGBE_SFF_VENDOR_OUI_FTL:
-                               if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
-                                       hw->phy.type = ixgbe_phy_sfp_ftl_active;
-                               else
-                                       hw->phy.type = ixgbe_phy_sfp_ftl;
-                               break;
-                       case IXGBE_SFF_VENDOR_OUI_AVAGO:
-                               hw->phy.type = ixgbe_phy_sfp_avago;
-                               break;
-                       case IXGBE_SFF_VENDOR_OUI_INTEL:
-                               hw->phy.type = ixgbe_phy_sfp_intel;
-                               break;
-                       default:
-                               if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
-                                       hw->phy.type =
-                                                ixgbe_phy_sfp_passive_unknown;
-                               else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
-                                       hw->phy.type =
-                                               ixgbe_phy_sfp_active_unknown;
-                               else
-                                       hw->phy.type = ixgbe_phy_sfp_unknown;
-                               break;
-                       }
-               }
+       if (hw->phy.sfp_type != stored_sfp_type)
+               hw->phy.sfp_setup_needed = true;
 
-               /* Allow any DA cable vendor */
-               if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
-                   IXGBE_SFF_DA_ACTIVE_CABLE)) {
-                       status = 0;
-                       goto out;
-               }
+       /* Determine if the SFP+ PHY is dual speed or not. */
+       hw->phy.multispeed_fiber = false;
+       if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) &&
+            (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) ||
+           ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) &&
+            (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE)))
+               hw->phy.multispeed_fiber = true;
 
-               /* Verify supported 1G SFP modules */
-               if (comp_codes_10g == 0 &&
-                   !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
-                       hw->phy.type = ixgbe_phy_sfp_unsupported;
-                       status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                       goto out;
-               }
+       /* Determine PHY vendor */
+       if (hw->phy.type != ixgbe_phy_nl) {
+               hw->phy.id = identifier;
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                           IXGBE_SFF_VENDOR_OUI_BYTE0,
+                                           &oui_bytes[0]);
 
-               /* Anything else 82598-based is supported */
-               if (hw->mac.type == ixgbe_mac_82598EB) {
-                       status = 0;
-                       goto out;
-               }
+               if (status != 0)
+                       goto err_read_i2c_eeprom;
 
-               hw->mac.ops.get_device_caps(hw, &enforce_sfp);
-               if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
-                   !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
-                     hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
-                       /* Make sure we're a supported PHY type */
-                       if (hw->phy.type == ixgbe_phy_sfp_intel) {
-                               status = 0;
-                       } else {
-                               if (hw->allow_unsupported_sfp) {
-                                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.\n");
-                                       status = 0;
-                               } else {
-                                       hw_dbg(hw,
-                                              "SFP+ module not supported\n");
-                                       hw->phy.type =
-                                               ixgbe_phy_sfp_unsupported;
-                                       status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                               }
-                       }
-               } else {
-                       status = 0;
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                           IXGBE_SFF_VENDOR_OUI_BYTE1,
+                                           &oui_bytes[1]);
+
+               if (status != 0)
+                       goto err_read_i2c_eeprom;
+
+               status = hw->phy.ops.read_i2c_eeprom(hw,
+                                           IXGBE_SFF_VENDOR_OUI_BYTE2,
+                                           &oui_bytes[2]);
+
+               if (status != 0)
+                       goto err_read_i2c_eeprom;
+
+               vendor_oui =
+                 ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) |
+                  (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) |
+                  (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT));
+
+               switch (vendor_oui) {
+               case IXGBE_SFF_VENDOR_OUI_TYCO:
+                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                               hw->phy.type =
+                                           ixgbe_phy_sfp_passive_tyco;
+                       break;
+               case IXGBE_SFF_VENDOR_OUI_FTL:
+                       if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+                               hw->phy.type = ixgbe_phy_sfp_ftl_active;
+                       else
+                               hw->phy.type = ixgbe_phy_sfp_ftl;
+                       break;
+               case IXGBE_SFF_VENDOR_OUI_AVAGO:
+                       hw->phy.type = ixgbe_phy_sfp_avago;
+                       break;
+               case IXGBE_SFF_VENDOR_OUI_INTEL:
+                       hw->phy.type = ixgbe_phy_sfp_intel;
+                       break;
+               default:
+                       if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE)
+                               hw->phy.type =
+                                        ixgbe_phy_sfp_passive_unknown;
+                       else if (cable_tech & IXGBE_SFF_DA_ACTIVE_CABLE)
+                               hw->phy.type =
+                                       ixgbe_phy_sfp_active_unknown;
+                       else
+                               hw->phy.type = ixgbe_phy_sfp_unknown;
+                       break;
                }
        }
 
-out:
-       return status;
+       /* Allow any DA cable vendor */
+       if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE |
+           IXGBE_SFF_DA_ACTIVE_CABLE))
+               return 0;
+
+       /* Verify supported 1G SFP modules */
+       if (comp_codes_10g == 0 &&
+           !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+               hw->phy.type = ixgbe_phy_sfp_unsupported;
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+       }
+
+       /* Anything else 82598-based is supported */
+       if (hw->mac.type == ixgbe_mac_82598EB)
+               return 0;
+
+       hw->mac.ops.get_device_caps(hw, &enforce_sfp);
+       if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) &&
+           !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 ||
+             hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) {
+               /* Make sure we're a supported PHY type */
+               if (hw->phy.type == ixgbe_phy_sfp_intel)
+                       return 0;
+               if (hw->allow_unsupported_sfp) {
+                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics.  Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter.  Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+                       return 0;
+               }
+               hw_dbg(hw, "SFP+ module not supported\n");
+               hw->phy.type = ixgbe_phy_sfp_unsupported;
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
+       }
+       return 0;
 
 err_read_i2c_eeprom:
        hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1211,7 +1180,7 @@ err_read_i2c_eeprom:
 static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
 {
        struct ixgbe_adapter *adapter = hw->back;
-       s32 status = IXGBE_ERR_PHY_ADDR_INVALID;
+       s32 status;
        u32 vendor_oui = 0;
        enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type;
        u8 identifier = 0;
@@ -1226,8 +1195,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
 
        if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) {
                hw->phy.sfp_type = ixgbe_sfp_type_not_present;
-               status = IXGBE_ERR_SFP_NOT_PRESENT;
-               goto out;
+               return IXGBE_ERR_SFP_NOT_PRESENT;
        }
 
        status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER,
@@ -1238,8 +1206,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
 
        if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) {
                hw->phy.type = ixgbe_phy_sfp_unsupported;
-               status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-               goto out;
+               return IXGBE_ERR_SFP_NOT_SUPPORTED;
        }
 
        hw->phy.id = identifier;
@@ -1310,8 +1277,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
                } else {
                        /* unsupported module type */
                        hw->phy.type = ixgbe_phy_sfp_unsupported;
-                       status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                       goto out;
+                       return IXGBE_ERR_SFP_NOT_SUPPORTED;
                }
        }
 
@@ -1363,27 +1329,19 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw)
                hw->mac.ops.get_device_caps(hw, &enforce_sfp);
                if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) {
                        /* Make sure we're a supported PHY type */
-                       if (hw->phy.type == ixgbe_phy_qsfp_intel) {
-                               status = 0;
-                       } else {
-                               if (hw->allow_unsupported_sfp == true) {
-                                       e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
-                                       status = 0;
-                               } else {
-                                       hw_dbg(hw,
-                                              "QSFP module not supported\n");
-                                       hw->phy.type =
-                                               ixgbe_phy_sfp_unsupported;
-                                       status = IXGBE_ERR_SFP_NOT_SUPPORTED;
-                               }
+                       if (hw->phy.type == ixgbe_phy_qsfp_intel)
+                               return 0;
+                       if (hw->allow_unsupported_sfp) {
+                               e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n");
+                               return 0;
                        }
-               } else {
-                       status = 0;
+                       hw_dbg(hw, "QSFP module not supported\n");
+                       hw->phy.type = ixgbe_phy_sfp_unsupported;
+                       return IXGBE_ERR_SFP_NOT_SUPPORTED;
                }
+               return 0;
        }
-
-out:
-       return status;
+       return 0;
 
 err_read_i2c_eeprom:
        hw->phy.sfp_type = ixgbe_sfp_type_not_present;
@@ -1544,7 +1502,7 @@ s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset,
 s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                u8 dev_addr, u8 *data)
 {
-       s32 status = 0;
+       s32 status;
        u32 max_retry = 10;
        u32 retry = 0;
        u16 swfw_mask = 0;
@@ -1557,10 +1515,8 @@ s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                swfw_mask = IXGBE_GSSR_PHY0_SM;
 
        do {
-               if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
-                       status = IXGBE_ERR_SWFW_SYNC;
-                       goto read_byte_out;
-               }
+               if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+                       return IXGBE_ERR_SWFW_SYNC;
 
                ixgbe_i2c_start(hw);
 
@@ -1617,7 +1573,6 @@ fail:
 
        hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 
-read_byte_out:
        return status;
 }
 
@@ -1633,7 +1588,7 @@ read_byte_out:
 s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
                                 u8 dev_addr, u8 data)
 {
-       s32 status = 0;
+       s32 status;
        u32 max_retry = 1;
        u32 retry = 0;
        u16 swfw_mask = 0;
@@ -1643,10 +1598,8 @@ s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset,
        else
                swfw_mask = IXGBE_GSSR_PHY0_SM;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) {
-               status = IXGBE_ERR_SWFW_SYNC;
-               goto write_byte_out;
-       }
+       if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+               return IXGBE_ERR_SWFW_SYNC;
 
        do {
                ixgbe_i2c_start(hw);
@@ -1689,7 +1642,6 @@ fail:
 
        hw->mac.ops.release_swfw_sync(hw, swfw_mask);
 
-write_byte_out:
        return status;
 }
 
@@ -1774,7 +1726,7 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data)
  **/
 static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data)
 {
-       s32 status = 0;
+       s32 status;
        s32 i;
        u32 i2cctl;
        bool bit = false;
@@ -1893,11 +1845,11 @@ static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data)
                 */
                udelay(IXGBE_I2C_T_LOW);
        } else {
-               status = IXGBE_ERR_I2C;
                hw_dbg(hw, "I2C data was not set to %X\n", data);
+               return IXGBE_ERR_I2C;
        }
 
-       return status;
+       return 0;
 }
 /**
  *  ixgbe_raise_i2c_clk - Raises the I2C SCL clock
@@ -1954,8 +1906,6 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl)
  **/
 static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
 {
-       s32 status = 0;
-
        if (data)
                *i2cctl |= IXGBE_I2C_DATA_OUT;
        else
@@ -1970,11 +1920,11 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
        /* Verify data was set correctly */
        *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL);
        if (data != ixgbe_get_i2c_data(i2cctl)) {
-               status = IXGBE_ERR_I2C;
                hw_dbg(hw, "Error - I2C data was not set to %X.\n", data);
+               return IXGBE_ERR_I2C;
        }
 
-       return status;
+       return 0;
 }
 
 /**
@@ -1986,14 +1936,9 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data)
  **/
 static bool ixgbe_get_i2c_data(u32 *i2cctl)
 {
-       bool data;
-
        if (*i2cctl & IXGBE_I2C_DATA_IN)
-               data = true;
-       else
-               data = false;
-
-       return data;
+               return true;
+       return false;
 }
 
 /**
@@ -2038,20 +1983,17 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw)
  **/
 s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw)
 {
-       s32 status = 0;
        u16 phy_data = 0;
 
        if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM)
-               goto out;
+               return 0;
 
        /* Check that the LASI temp alarm status was triggered */
        hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG,
                             MDIO_MMD_PMAPMD, &phy_data);
 
        if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM))
-               goto out;
+               return 0;
 
-       status = IXGBE_ERR_OVERTEMP;
-out:
-       return status;
+       return IXGBE_ERR_OVERTEMP;
 }
index 68f87ec..5fd4b52 100644 (file)
 #define IXGBE_OVERFLOW_PERIOD    (HZ * 30)
 #define IXGBE_PTP_TX_TIMEOUT     (HZ * 15)
 
-#ifndef NSECS_PER_SEC
-#define NSECS_PER_SEC 1000000000ULL
-#endif
+/* half of a one second clock period, for use with PPS signal. We have to use
+ * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in
+ * order to force at least 64bits of precision for shifting
+ */
+#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL
 
 /**
  * ixgbe_ptp_setup_sdp
@@ -146,8 +148,8 @@ static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
                          IXGBE_TSAUXC_SDP0_INT);
 
                /* clock period (or pulse length) */
-               clktiml = (u32)(NSECS_PER_SEC << shift);
-               clktimh = (u32)((NSECS_PER_SEC << shift) >> 32);
+               clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift);
+               clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32);
 
                /*
                 * Account for the cyclecounter wrap-around value by
@@ -158,8 +160,8 @@ static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
                clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
                ns = timecounter_cyc2time(&adapter->tc, clock_edge);
 
-               div_u64_rem(ns, NSECS_PER_SEC, &rem);
-               clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift);
+               div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem);
+               clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift);
 
                /* specify the initial clock start time */
                trgttiml = (u32)clock_edge;
index 16b3a1c..c14d4d8 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -245,10 +245,10 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
        if (pre_existing_vfs && pre_existing_vfs != num_vfs)
                err = ixgbe_disable_sriov(adapter);
        else if (pre_existing_vfs && pre_existing_vfs == num_vfs)
-               goto out;
+               return num_vfs;
 
        if (err)
-               goto err_out;
+               return err;
 
        /* While the SR-IOV capability structure reports total VFs to be
         * 64 we limit the actual number that can be allocated to 63 so
@@ -256,16 +256,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
         * PF.  The PCI bus driver already checks for other values out of
         * range.
         */
-       if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) {
-               err = -EPERM;
-               goto err_out;
-       }
+       if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT)
+               return -EPERM;
 
        adapter->num_vfs = num_vfs;
 
        err = __ixgbe_enable_sriov(adapter);
        if (err)
-               goto err_out;
+               return  err;
 
        for (i = 0; i < adapter->num_vfs; i++)
                ixgbe_vf_configuration(dev, (i | 0x10000000));
@@ -273,17 +271,14 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs)
        err = pci_enable_sriov(dev, num_vfs);
        if (err) {
                e_dev_warn("Failed to enable PCI sriov: %d\n", err);
-               goto err_out;
+               return err;
        }
        ixgbe_sriov_reinit(adapter);
 
-out:
        return num_vfs;
-
-err_out:
-       return err;
-#endif
+#else
        return 0;
+#endif
 }
 
 static int ixgbe_pci_sriov_disable(struct pci_dev *dev)
@@ -807,7 +802,7 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
        if (!add && adapter->netdev->flags & IFF_PROMISC) {
                reg_ndx = ixgbe_find_vlvf_entry(hw, vid);
                if (reg_ndx < 0)
-                       goto out;
+                       return err;
                vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(reg_ndx));
                /* See if any other pools are set for this VLAN filter
                 * entry other than the PF.
@@ -833,8 +828,6 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter,
                        ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0));
        }
 
-out:
-
        return err;
 }
 
@@ -951,7 +944,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
 
        /* this is a message we already processed, do nothing */
        if (msgbuf[0] & (IXGBE_VT_MSGTYPE_ACK | IXGBE_VT_MSGTYPE_NACK))
-               return retval;
+               return 0;
 
        /* flush the ack before we write any messages back */
        IXGBE_WRITE_FLUSH(hw);
@@ -966,7 +959,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf)
        if (!adapter->vfinfo[vf].clear_to_send) {
                msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK;
                ixgbe_write_mbx(hw, msgbuf, 1, vf);
-               return retval;
+               return 0;
        }
 
        switch ((msgbuf[0] & 0xFFFF)) {
index 9a89f98..e6b07c2 100644 (file)
@@ -2440,25 +2440,6 @@ typedef u32 ixgbe_link_speed;
                                        IXGBE_LINK_SPEED_1GB_FULL | \
                                        IXGBE_LINK_SPEED_10GB_FULL)
 
-
-/* Physical layer type */
-typedef u32 ixgbe_physical_layer;
-#define IXGBE_PHYSICAL_LAYER_UNKNOWN      0
-#define IXGBE_PHYSICAL_LAYER_10GBASE_T    0x0001
-#define IXGBE_PHYSICAL_LAYER_1000BASE_T   0x0002
-#define IXGBE_PHYSICAL_LAYER_100BASE_TX   0x0004
-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU  0x0008
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR   0x0010
-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM  0x0020
-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR   0x0040
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4  0x0080
-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4  0x0100
-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX  0x0200
-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX  0x0400
-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR   0x0800
-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000
-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000
-
 /* Flow Control Data Sheet defined values
  * Calculation and defines taken from 802.1bb Annex O
  */
@@ -2860,7 +2841,6 @@ struct ixgbe_mac_operations {
        s32 (*start_hw)(struct ixgbe_hw *);
        s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
        enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
-       u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
        s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
        s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
        s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
index 40dd798..e88305d 100644 (file)
@@ -1,7 +1,7 @@
 /*******************************************************************************
 
   Intel 10 Gigabit PCI Express Linux driver
-  Copyright(c) 1999 - 2013 Intel Corporation.
+  Copyright(c) 1999 - 2014 Intel Corporation.
 
   This program is free software; you can redistribute it and/or modify it
   under the terms and conditions of the GNU General Public License,
@@ -99,8 +99,8 @@ static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw)
 
        /* Call adapter stop to disable tx/rx and clear interrupts */
        status = hw->mac.ops.stop_adapter(hw);
-       if (status != 0)
-               goto reset_hw_out;
+       if (status)
+               return status;
 
        /* flush pending Tx transactions */
        ixgbe_clear_tx_pending(hw);
@@ -168,7 +168,6 @@ mac_reset_top:
        hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
                                   &hw->mac.wwpn_prefix);
 
-reset_hw_out:
        return status;
 }
 
@@ -182,40 +181,13 @@ reset_hw_out:
  **/
 static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw)
 {
-       s32 ret_val = 0;
+       s32 ret_val;
 
        ret_val = ixgbe_start_hw_generic(hw);
-       if (ret_val != 0)
-               goto out;
+       if (ret_val)
+               return ret_val;
 
-       ret_val = ixgbe_start_hw_gen2(hw);
-out:
-       return ret_val;
-}
-
-/**
- *  ixgbe_get_supported_physical_layer_X540 - Returns physical layer type
- *  @hw: pointer to hardware structure
- *
- *  Determines physical layer capabilities of the current configuration.
- **/
-static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw)
-{
-       u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN;
-       u16 ext_ability = 0;
-
-       hw->phy.ops.identify(hw);
-
-       hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD,
-                            &ext_ability);
-       if (ext_ability & MDIO_PMA_EXTABLE_10GBT)
-               physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T;
-       if (ext_ability & MDIO_PMA_EXTABLE_1000BT)
-               physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T;
-       if (ext_ability & MDIO_PMA_EXTABLE_100BTX)
-               physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX;
-
-       return physical_layer;
+       return ixgbe_start_hw_gen2(hw);
 }
 
 /**
@@ -258,13 +230,12 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw)
  **/
 static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
 {
-       s32 status = 0;
+       s32 status;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-           0)
-               status = ixgbe_read_eerd_generic(hw, offset, data);
-       else
-               status = IXGBE_ERR_SWFW_SYNC;
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+               return IXGBE_ERR_SWFW_SYNC;
+
+       status = ixgbe_read_eerd_generic(hw, offset, data);
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
        return status;
@@ -282,14 +253,12 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data)
 static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
                                       u16 offset, u16 words, u16 *data)
 {
-       s32 status = 0;
+       s32 status;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-           0)
-               status = ixgbe_read_eerd_buffer_generic(hw, offset,
-                                                       words, data);
-       else
-               status = IXGBE_ERR_SWFW_SYNC;
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+               return IXGBE_ERR_SWFW_SYNC;
+
+       status = ixgbe_read_eerd_buffer_generic(hw, offset, words, data);
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
        return status;
@@ -305,12 +274,12 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw,
  **/
 static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
 {
-       s32 status = 0;
+       s32 status;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0)
-               status = ixgbe_write_eewr_generic(hw, offset, data);
-       else
-               status = IXGBE_ERR_SWFW_SYNC;
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+               return IXGBE_ERR_SWFW_SYNC;
+
+       status = ixgbe_write_eewr_generic(hw, offset, data);
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
        return status;
@@ -328,14 +297,12 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data)
 static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw,
                                        u16 offset, u16 words, u16 *data)
 {
-       s32 status = 0;
+       s32 status;
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ==
-           0)
-               status = ixgbe_write_eewr_buffer_generic(hw, offset,
-                                                        words, data);
-       else
-               status = IXGBE_ERR_SWFW_SYNC;
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+               return IXGBE_ERR_SWFW_SYNC;
+
+       status = ixgbe_write_eewr_buffer_generic(hw, offset, words, data);
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
        return status;
@@ -430,44 +397,37 @@ static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw,
        u16 checksum;
        u16 read_checksum = 0;
 
-       /*
-        * Read the first word from the EEPROM. If this times out or fails, do
+       /* Read the first word from the EEPROM. If this times out or fails, do
         * not continue or we could be in for a very long wait while every
         * EEPROM read fails
         */
        status = hw->eeprom.ops.read(hw, 0, &checksum);
-
-       if (status != 0) {
+       if (status) {
                hw_dbg(hw, "EEPROM read failed\n");
-               goto out;
+               return status;
        }
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
-               checksum = hw->eeprom.ops.calc_checksum(hw);
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+               return IXGBE_ERR_SWFW_SYNC;
 
-               /*
-                * Do not use hw->eeprom.ops.read because we do not want to take
-                * the synchronization semaphores twice here.
-                */
-               ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
-                                       &read_checksum);
+       checksum = hw->eeprom.ops.calc_checksum(hw);
 
-               /*
-                * Verify read checksum from EEPROM is the same as
-                * calculated checksum
-                */
-               if (read_checksum != checksum)
-                       status = IXGBE_ERR_EEPROM_CHECKSUM;
-
-               /* If the user cares, return the calculated checksum */
-               if (checksum_val)
-                       *checksum_val = checksum;
-       } else {
-               status = IXGBE_ERR_SWFW_SYNC;
-       }
+       /* Do not use hw->eeprom.ops.read because we do not want to take
+        * the synchronization semaphores twice here.
+        */
+       status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM,
+                                        &read_checksum);
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-out:
+
+       /* If the user cares, return the calculated checksum */
+       if (checksum_val)
+               *checksum_val = checksum;
+
+       /* Verify read and calculated checksums are the same */
+       if (read_checksum != checksum)
+               return IXGBE_ERR_EEPROM_CHECKSUM;
+
        return status;
 }
 
@@ -484,34 +444,29 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
        s32 status;
        u16 checksum;
 
-       /*
-        * Read the first word from the EEPROM. If this times out or fails, do
+       /* Read the first word from the EEPROM. If this times out or fails, do
         * not continue or we could be in for a very long wait while every
         * EEPROM read fails
         */
        status = hw->eeprom.ops.read(hw, 0, &checksum);
-
-       if (status != 0)
+       if (status) {
                hw_dbg(hw, "EEPROM read failed\n");
+               return status;
+       }
 
-       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) {
-               checksum = hw->eeprom.ops.calc_checksum(hw);
+       if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM))
+               return  IXGBE_ERR_SWFW_SYNC;
 
-               /*
-                * Do not use hw->eeprom.ops.write because we do not want to
-                * take the synchronization semaphores twice here.
-                */
-               status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM,
-                                                 checksum);
+       checksum = hw->eeprom.ops.calc_checksum(hw);
 
-       if (status == 0)
+       /* Do not use hw->eeprom.ops.write because we do not want to
+        * take the synchronization semaphores twice here.
+        */
+       status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum);
+       if (!status)
                status = ixgbe_update_flash_X540(hw);
-       else
-               status = IXGBE_ERR_SWFW_SYNC;
-       }
 
        hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM);
-
        return status;
 }
 
@@ -525,12 +480,12 @@ static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw)
 static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
 {
        u32 flup;
-       s32 status = IXGBE_ERR_EEPROM;
+       s32 status;
 
        status = ixgbe_poll_flash_update_done_X540(hw);
        if (status == IXGBE_ERR_EEPROM) {
                hw_dbg(hw, "Flash update time out\n");
-               goto out;
+               return status;
        }
 
        flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP;
@@ -556,7 +511,7 @@ static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw)
                else
                        hw_dbg(hw, "Flash update time out\n");
        }
-out:
+
        return status;
 }
 
@@ -571,17 +526,14 @@ static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw)
 {
        u32 i;
        u32 reg;
-       s32 status = IXGBE_ERR_EEPROM;
 
        for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) {
                reg = IXGBE_READ_REG(hw, IXGBE_EEC);
-               if (reg & IXGBE_EEC_FLUDONE) {
-                       status = 0;
-                       break;
-               }
+               if (reg & IXGBE_EEC_FLUDONE)
+                       return 0;
                udelay(5);
        }
-       return status;
+       return IXGBE_ERR_EEPROM;
 }
 
 /**
@@ -676,46 +628,44 @@ static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask)
 }
 
 /**
- * ixgbe_get_nvm_semaphore - Get hardware semaphore
+ * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore
  * @hw: pointer to hardware structure
  *
  * Sets the hardware semaphores so SW/FW can gain control of shared resources
- **/
+ */
 static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw)
 {
-       s32 status = IXGBE_ERR_EEPROM;
        u32 timeout = 2000;
        u32 i;
        u32 swsm;
 
        /* Get SMBI software semaphore between device drivers first */
        for (i = 0; i < timeout; i++) {
-               /*
-                * If the SMBI bit is 0 when we read it, then the bit will be
+               /* If the SMBI bit is 0 when we read it, then the bit will be
                 * set and we have the semaphore
                 */
                swsm = IXGBE_READ_REG(hw, IXGBE_SWSM);
-               if (!(swsm & IXGBE_SWSM_SMBI)) {
-                       status = 0;
+               if (!(swsm & IXGBE_SWSM_SMBI))
                        break;
-               }
-               udelay(50);
+               usleep_range(50, 100);
+       }
+
+       if (i == timeout) {
+               hw_dbg(hw,
+                      "Software semaphore SMBI between device drivers not granted.\n");
+               return IXGBE_ERR_EEPROM;
        }
 
        /* Now get the semaphore between SW/FW through the REGSMP bit */
-       if (status) {
-               for (i = 0; i < timeout; i++) {
-                       swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
-                       if (!(swsm & IXGBE_SWFW_REGSMP))
-                               break;
+       for (i = 0; i < timeout; i++) {
+               swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC);
+               if (!(swsm & IXGBE_SWFW_REGSMP))
+                       return 0;
 
-                       udelay(50);
-               }
-       } else {
-               hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n");
+               usleep_range(50, 100);
        }
 
-       return status;
+       return IXGBE_ERR_EEPROM;
 }
 
 /**
@@ -811,8 +761,6 @@ static struct ixgbe_mac_operations mac_ops_X540 = {
        .start_hw               = &ixgbe_start_hw_X540,
        .clear_hw_cntrs         = &ixgbe_clear_hw_cntrs_generic,
        .get_media_type         = &ixgbe_get_media_type_X540,
-       .get_supported_physical_layer =
-                                 &ixgbe_get_supported_physical_layer_X540,
        .enable_rx_dma          = &ixgbe_enable_rx_dma_generic,
        .get_mac_addr           = &ixgbe_get_mac_addr_generic,
        .get_san_mac_addr       = &ixgbe_get_san_mac_addr_generic,
index 3061d18..aa8cc8d 100644 (file)
@@ -49,7 +49,6 @@ struct ixgbe_mac_operations {
        s32 (*start_hw)(struct ixgbe_hw *);
        s32 (*clear_hw_cntrs)(struct ixgbe_hw *);
        enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *);
-       u32 (*get_supported_physical_layer)(struct ixgbe_hw *);
        s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
        s32 (*stop_adapter)(struct ixgbe_hw *);
        s32 (*get_bus_info)(struct ixgbe_hw *);
index 68e6a66..1b4fc7c 100644 (file)
@@ -54,6 +54,14 @@ config MVNETA
          driver, which should be used for the older Marvell SoCs
          (Dove, Orion, Discovery, Kirkwood).
 
+config MVPP2
+       tristate "Marvell Armada 375 network interface support"
+       depends on MACH_ARMADA_375
+       select MVMDIO
+       ---help---
+         This driver supports the network interface units in the
+         Marvell ARMADA 375 SoC.
+
 config PXA168_ETH
        tristate "Marvell pxa168 ethernet support"
        depends on CPU_PXA168
index 5c4a776..f6425bd 100644 (file)
@@ -5,6 +5,7 @@
 obj-$(CONFIG_MVMDIO) += mvmdio.o
 obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o
 obj-$(CONFIG_MVNETA) += mvneta.o
+obj-$(CONFIG_MVPP2) += mvpp2.o
 obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o
 obj-$(CONFIG_SKGE) += skge.o
 obj-$(CONFIG_SKY2) += sky2.o
index 45beca1..dadd9a5 100644 (file)
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
        command =  l3_offs    << MVNETA_TX_L3_OFF_SHIFT;
        command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
 
-       if (l3_proto == swab16(ETH_P_IP))
+       if (l3_proto == htons(ETH_P_IP))
                command |= MVNETA_TXD_IP_CSUM;
        else
                command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
 
                        if (phydev->speed == SPEED_1000)
                                val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
-                       else
+                       else if (phydev->speed == SPEED_100)
                                val |= MVNETA_GMAC_CONFIG_MII_SPEED;
 
                        mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
new file mode 100644 (file)
index 0000000..ece83f1
--- /dev/null
@@ -0,0 +1,6426 @@
+/*
+ * Driver for Marvell PPv2 network controller for Armada 375 SoC.
+ *
+ * Copyright (C) 2014 Marvell
+ *
+ * Marcin Wojtas <mw@semihalf.com>
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/platform_device.h>
+#include <linux/skbuff.h>
+#include <linux/inetdevice.h>
+#include <linux/mbus.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/cpumask.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/of_address.h>
+#include <linux/phy.h>
+#include <linux/clk.h>
+#include <uapi/linux/ppp_defs.h>
+#include <net/ip.h>
+#include <net/ipv6.h>
+
+/* RX Fifo Registers */
+#define MVPP2_RX_DATA_FIFO_SIZE_REG(port)      (0x00 + 4 * (port))
+#define MVPP2_RX_ATTR_FIFO_SIZE_REG(port)      (0x20 + 4 * (port))
+#define MVPP2_RX_MIN_PKT_SIZE_REG              0x60
+#define MVPP2_RX_FIFO_INIT_REG                 0x64
+
+/* RX DMA Top Registers */
+#define MVPP2_RX_CTRL_REG(port)                        (0x140 + 4 * (port))
+#define     MVPP2_RX_LOW_LATENCY_PKT_SIZE(s)   (((s) & 0xfff) << 16)
+#define     MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK  BIT(31)
+#define MVPP2_POOL_BUF_SIZE_REG(pool)          (0x180 + 4 * (pool))
+#define     MVPP2_POOL_BUF_SIZE_OFFSET         5
+#define MVPP2_RXQ_CONFIG_REG(rxq)              (0x800 + 4 * (rxq))
+#define     MVPP2_SNOOP_PKT_SIZE_MASK          0x1ff
+#define     MVPP2_SNOOP_BUF_HDR_MASK           BIT(9)
+#define     MVPP2_RXQ_POOL_SHORT_OFFS          20
+#define     MVPP2_RXQ_POOL_SHORT_MASK          0x700000
+#define     MVPP2_RXQ_POOL_LONG_OFFS           24
+#define     MVPP2_RXQ_POOL_LONG_MASK           0x7000000
+#define     MVPP2_RXQ_PACKET_OFFSET_OFFS       28
+#define     MVPP2_RXQ_PACKET_OFFSET_MASK       0x70000000
+#define     MVPP2_RXQ_DISABLE_MASK             BIT(31)
+
+/* Parser Registers */
+#define MVPP2_PRS_INIT_LOOKUP_REG              0x1000
+#define     MVPP2_PRS_PORT_LU_MAX              0xf
+#define     MVPP2_PRS_PORT_LU_MASK(port)       (0xff << ((port) * 4))
+#define     MVPP2_PRS_PORT_LU_VAL(port, val)   ((val) << ((port) * 4))
+#define MVPP2_PRS_INIT_OFFS_REG(port)          (0x1004 + ((port) & 4))
+#define     MVPP2_PRS_INIT_OFF_MASK(port)      (0x3f << (((port) % 4) * 8))
+#define     MVPP2_PRS_INIT_OFF_VAL(port, val)  ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_MAX_LOOP_REG(port)           (0x100c + ((port) & 4))
+#define     MVPP2_PRS_MAX_LOOP_MASK(port)      (0xff << (((port) % 4) * 8))
+#define     MVPP2_PRS_MAX_LOOP_VAL(port, val)  ((val) << (((port) % 4) * 8))
+#define MVPP2_PRS_TCAM_IDX_REG                 0x1100
+#define MVPP2_PRS_TCAM_DATA_REG(idx)           (0x1104 + (idx) * 4)
+#define     MVPP2_PRS_TCAM_INV_MASK            BIT(31)
+#define MVPP2_PRS_SRAM_IDX_REG                 0x1200
+#define MVPP2_PRS_SRAM_DATA_REG(idx)           (0x1204 + (idx) * 4)
+#define MVPP2_PRS_TCAM_CTRL_REG                        0x1230
+#define     MVPP2_PRS_TCAM_EN_MASK             BIT(0)
+
+/* Classifier Registers */
+#define MVPP2_CLS_MODE_REG                     0x1800
+#define     MVPP2_CLS_MODE_ACTIVE_MASK         BIT(0)
+#define MVPP2_CLS_PORT_WAY_REG                 0x1810
+#define     MVPP2_CLS_PORT_WAY_MASK(port)      (1 << (port))
+#define MVPP2_CLS_LKP_INDEX_REG                        0x1814
+#define     MVPP2_CLS_LKP_INDEX_WAY_OFFS       6
+#define MVPP2_CLS_LKP_TBL_REG                  0x1818
+#define     MVPP2_CLS_LKP_TBL_RXQ_MASK         0xff
+#define     MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK   BIT(25)
+#define MVPP2_CLS_FLOW_INDEX_REG               0x1820
+#define MVPP2_CLS_FLOW_TBL0_REG                        0x1824
+#define MVPP2_CLS_FLOW_TBL1_REG                        0x1828
+#define MVPP2_CLS_FLOW_TBL2_REG                        0x182c
+#define MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port)   (0x1980 + ((port) * 4))
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS    3
+#define     MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK    0x7
+#define MVPP2_CLS_SWFWD_P2HQ_REG(port)         (0x19b0 + ((port) * 4))
+#define MVPP2_CLS_SWFWD_PCTRL_REG              0x19d0
+#define     MVPP2_CLS_SWFWD_PCTRL_MASK(port)   (1 << (port))
+
+/* Descriptor Manager Top Registers */
+#define MVPP2_RXQ_NUM_REG                      0x2040
+#define MVPP2_RXQ_DESC_ADDR_REG                        0x2044
+#define MVPP2_RXQ_DESC_SIZE_REG                        0x2048
+#define     MVPP2_RXQ_DESC_SIZE_MASK           0x3ff0
+#define MVPP2_RXQ_STATUS_UPDATE_REG(rxq)       (0x3000 + 4 * (rxq))
+#define     MVPP2_RXQ_NUM_PROCESSED_OFFSET     0
+#define     MVPP2_RXQ_NUM_NEW_OFFSET           16
+#define MVPP2_RXQ_STATUS_REG(rxq)              (0x3400 + 4 * (rxq))
+#define     MVPP2_RXQ_OCCUPIED_MASK            0x3fff
+#define     MVPP2_RXQ_NON_OCCUPIED_OFFSET      16
+#define     MVPP2_RXQ_NON_OCCUPIED_MASK                0x3fff0000
+#define MVPP2_RXQ_THRESH_REG                   0x204c
+#define     MVPP2_OCCUPIED_THRESH_OFFSET       0
+#define     MVPP2_OCCUPIED_THRESH_MASK         0x3fff
+#define MVPP2_RXQ_INDEX_REG                    0x2050
+#define MVPP2_TXQ_NUM_REG                      0x2080
+#define MVPP2_TXQ_DESC_ADDR_REG                        0x2084
+#define MVPP2_TXQ_DESC_SIZE_REG                        0x2088
+#define     MVPP2_TXQ_DESC_SIZE_MASK           0x3ff0
+#define MVPP2_AGGR_TXQ_UPDATE_REG              0x2090
+#define MVPP2_TXQ_THRESH_REG                   0x2094
+#define     MVPP2_TRANSMITTED_THRESH_OFFSET    16
+#define     MVPP2_TRANSMITTED_THRESH_MASK      0x3fff0000
+#define MVPP2_TXQ_INDEX_REG                    0x2098
+#define MVPP2_TXQ_PREF_BUF_REG                 0x209c
+#define     MVPP2_PREF_BUF_PTR(desc)           ((desc) & 0xfff)
+#define     MVPP2_PREF_BUF_SIZE_4              (BIT(12) | BIT(13))
+#define     MVPP2_PREF_BUF_SIZE_16             (BIT(12) | BIT(14))
+#define     MVPP2_PREF_BUF_THRESH(val)         ((val) << 17)
+#define     MVPP2_TXQ_DRAIN_EN_MASK            BIT(31)
+#define MVPP2_TXQ_PENDING_REG                  0x20a0
+#define     MVPP2_TXQ_PENDING_MASK             0x3fff
+#define MVPP2_TXQ_INT_STATUS_REG               0x20a4
+#define MVPP2_TXQ_SENT_REG(txq)                        (0x3c00 + 4 * (txq))
+#define     MVPP2_TRANSMITTED_COUNT_OFFSET     16
+#define     MVPP2_TRANSMITTED_COUNT_MASK       0x3fff0000
+#define MVPP2_TXQ_RSVD_REQ_REG                 0x20b0
+#define     MVPP2_TXQ_RSVD_REQ_Q_OFFSET                16
+#define MVPP2_TXQ_RSVD_RSLT_REG                        0x20b4
+#define     MVPP2_TXQ_RSVD_RSLT_MASK           0x3fff
+#define MVPP2_TXQ_RSVD_CLR_REG                 0x20b8
+#define     MVPP2_TXQ_RSVD_CLR_OFFSET          16
+#define MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu)      (0x2100 + 4 * (cpu))
+#define MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu)      (0x2140 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_DESC_SIZE_MASK      0x3ff0
+#define MVPP2_AGGR_TXQ_STATUS_REG(cpu)         (0x2180 + 4 * (cpu))
+#define     MVPP2_AGGR_TXQ_PENDING_MASK                0x3fff
+#define MVPP2_AGGR_TXQ_INDEX_REG(cpu)          (0x21c0 + 4 * (cpu))
+
+/* MBUS bridge registers */
+#define MVPP2_WIN_BASE(w)                      (0x4000 + ((w) << 2))
+#define MVPP2_WIN_SIZE(w)                      (0x4020 + ((w) << 2))
+#define MVPP2_WIN_REMAP(w)                     (0x4040 + ((w) << 2))
+#define MVPP2_BASE_ADDR_ENABLE                 0x4060
+
+/* Interrupt Cause and Mask registers */
+#define MVPP2_ISR_RX_THRESHOLD_REG(rxq)                (0x5200 + 4 * (rxq))
+#define MVPP2_ISR_RXQ_GROUP_REG(rxq)           (0x5400 + 4 * (rxq))
+#define MVPP2_ISR_ENABLE_REG(port)             (0x5420 + 4 * (port))
+#define     MVPP2_ISR_ENABLE_INTERRUPT(mask)   ((mask) & 0xffff)
+#define     MVPP2_ISR_DISABLE_INTERRUPT(mask)  (((mask) << 16) & 0xffff0000)
+#define MVPP2_ISR_RX_TX_CAUSE_REG(port)                (0x5480 + 4 * (port))
+#define     MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK        0xffff
+#define     MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK        0xff0000
+#define     MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK   BIT(24)
+#define     MVPP2_CAUSE_FCS_ERR_MASK           BIT(25)
+#define     MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK  BIT(26)
+#define     MVPP2_CAUSE_TX_EXCEPTION_SUM_MASK  BIT(29)
+#define     MVPP2_CAUSE_RX_EXCEPTION_SUM_MASK  BIT(30)
+#define     MVPP2_CAUSE_MISC_SUM_MASK          BIT(31)
+#define MVPP2_ISR_RX_TX_MASK_REG(port)         (0x54a0 + 4 * (port))
+#define MVPP2_ISR_PON_RX_TX_MASK_REG           0x54bc
+#define     MVPP2_PON_CAUSE_RXQ_OCCUP_DESC_ALL_MASK    0xffff
+#define     MVPP2_PON_CAUSE_TXP_OCCUP_DESC_ALL_MASK    0x3fc00000
+#define     MVPP2_PON_CAUSE_MISC_SUM_MASK              BIT(31)
+#define MVPP2_ISR_MISC_CAUSE_REG               0x55b0
+
+/* Buffer Manager registers */
+#define MVPP2_BM_POOL_BASE_REG(pool)           (0x6000 + ((pool) * 4))
+#define     MVPP2_BM_POOL_BASE_ADDR_MASK       0xfffff80
+#define MVPP2_BM_POOL_SIZE_REG(pool)           (0x6040 + ((pool) * 4))
+#define     MVPP2_BM_POOL_SIZE_MASK            0xfff0
+#define MVPP2_BM_POOL_READ_PTR_REG(pool)       (0x6080 + ((pool) * 4))
+#define     MVPP2_BM_POOL_GET_READ_PTR_MASK    0xfff0
+#define MVPP2_BM_POOL_PTRS_NUM_REG(pool)       (0x60c0 + ((pool) * 4))
+#define     MVPP2_BM_POOL_PTRS_NUM_MASK                0xfff0
+#define MVPP2_BM_BPPI_READ_PTR_REG(pool)       (0x6100 + ((pool) * 4))
+#define MVPP2_BM_BPPI_PTRS_NUM_REG(pool)       (0x6140 + ((pool) * 4))
+#define     MVPP2_BM_BPPI_PTR_NUM_MASK         0x7ff
+#define     MVPP2_BM_BPPI_PREFETCH_FULL_MASK   BIT(16)
+#define MVPP2_BM_POOL_CTRL_REG(pool)           (0x6200 + ((pool) * 4))
+#define     MVPP2_BM_START_MASK                        BIT(0)
+#define     MVPP2_BM_STOP_MASK                 BIT(1)
+#define     MVPP2_BM_STATE_MASK                        BIT(4)
+#define     MVPP2_BM_LOW_THRESH_OFFS           8
+#define     MVPP2_BM_LOW_THRESH_MASK           0x7f00
+#define     MVPP2_BM_LOW_THRESH_VALUE(val)     ((val) << \
+                                               MVPP2_BM_LOW_THRESH_OFFS)
+#define     MVPP2_BM_HIGH_THRESH_OFFS          16
+#define     MVPP2_BM_HIGH_THRESH_MASK          0x7f0000
+#define     MVPP2_BM_HIGH_THRESH_VALUE(val)    ((val) << \
+                                               MVPP2_BM_HIGH_THRESH_OFFS)
+#define MVPP2_BM_INTR_CAUSE_REG(pool)          (0x6240 + ((pool) * 4))
+#define     MVPP2_BM_RELEASED_DELAY_MASK       BIT(0)
+#define     MVPP2_BM_ALLOC_FAILED_MASK         BIT(1)
+#define     MVPP2_BM_BPPE_EMPTY_MASK           BIT(2)
+#define     MVPP2_BM_BPPE_FULL_MASK            BIT(3)
+#define     MVPP2_BM_AVAILABLE_BP_LOW_MASK     BIT(4)
+#define MVPP2_BM_INTR_MASK_REG(pool)           (0x6280 + ((pool) * 4))
+#define MVPP2_BM_PHY_ALLOC_REG(pool)           (0x6400 + ((pool) * 4))
+#define     MVPP2_BM_PHY_ALLOC_GRNTD_MASK      BIT(0)
+#define MVPP2_BM_VIRT_ALLOC_REG                        0x6440
+#define MVPP2_BM_PHY_RLS_REG(pool)             (0x6480 + ((pool) * 4))
+#define     MVPP2_BM_PHY_RLS_MC_BUFF_MASK      BIT(0)
+#define     MVPP2_BM_PHY_RLS_PRIO_EN_MASK      BIT(1)
+#define     MVPP2_BM_PHY_RLS_GRNTD_MASK                BIT(2)
+#define MVPP2_BM_VIRT_RLS_REG                  0x64c0
+#define MVPP2_BM_MC_RLS_REG                    0x64c4
+#define     MVPP2_BM_MC_ID_MASK                        0xfff
+#define     MVPP2_BM_FORCE_RELEASE_MASK                BIT(12)
+
+/* TX Scheduler registers */
+#define MVPP2_TXP_SCHED_PORT_INDEX_REG         0x8000
+#define MVPP2_TXP_SCHED_Q_CMD_REG              0x8004
+#define     MVPP2_TXP_SCHED_ENQ_MASK           0xff
+#define     MVPP2_TXP_SCHED_DISQ_OFFSET                8
+#define MVPP2_TXP_SCHED_CMD_1_REG              0x8010
+#define MVPP2_TXP_SCHED_PERIOD_REG             0x8018
+#define MVPP2_TXP_SCHED_MTU_REG                        0x801c
+#define     MVPP2_TXP_MTU_MAX                  0x7FFFF
+#define MVPP2_TXP_SCHED_REFILL_REG             0x8020
+#define     MVPP2_TXP_REFILL_TOKENS_ALL_MASK   0x7ffff
+#define     MVPP2_TXP_REFILL_PERIOD_ALL_MASK   0x3ff00000
+#define     MVPP2_TXP_REFILL_PERIOD_MASK(v)    ((v) << 20)
+#define MVPP2_TXP_SCHED_TOKEN_SIZE_REG         0x8024
+#define     MVPP2_TXP_TOKEN_SIZE_MAX           0xffffffff
+#define MVPP2_TXQ_SCHED_REFILL_REG(q)          (0x8040 + ((q) << 2))
+#define     MVPP2_TXQ_REFILL_TOKENS_ALL_MASK   0x7ffff
+#define     MVPP2_TXQ_REFILL_PERIOD_ALL_MASK   0x3ff00000
+#define     MVPP2_TXQ_REFILL_PERIOD_MASK(v)    ((v) << 20)
+#define MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(q)      (0x8060 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_SIZE_MAX           0x7fffffff
+#define MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(q)      (0x8080 + ((q) << 2))
+#define     MVPP2_TXQ_TOKEN_CNTR_MAX           0xffffffff
+
+/* TX general registers */
+#define MVPP2_TX_SNOOP_REG                     0x8800
+#define MVPP2_TX_PORT_FLUSH_REG                        0x8810
+#define     MVPP2_TX_PORT_FLUSH_MASK(port)     (1 << (port))
+
+/* LMS registers */
+#define MVPP2_SRC_ADDR_MIDDLE                  0x24
+#define MVPP2_SRC_ADDR_HIGH                    0x28
+#define MVPP2_PHY_AN_CFG0_REG                  0x34
+#define     MVPP2_PHY_AN_STOP_SMI0_MASK                BIT(7)
+#define MVPP2_MIB_COUNTERS_BASE(port)          (0x1000 + ((port) >> 1) * \
+                                               0x400 + (port) * 0x400)
+#define     MVPP2_MIB_LATE_COLLISION           0x7c
+#define MVPP2_ISR_SUM_MASK_REG                 0x220c
+#define MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG     0x305c
+#define MVPP2_EXT_GLOBAL_CTRL_DEFAULT          0x27
+
+/* Per-port registers */
+#define MVPP2_GMAC_CTRL_0_REG                  0x0
+#define      MVPP2_GMAC_PORT_EN_MASK           BIT(0)
+#define      MVPP2_GMAC_MAX_RX_SIZE_OFFS       2
+#define      MVPP2_GMAC_MAX_RX_SIZE_MASK       0x7ffc
+#define      MVPP2_GMAC_MIB_CNTR_EN_MASK       BIT(15)
+#define MVPP2_GMAC_CTRL_1_REG                  0x4
+#define      MVPP2_GMAC_PERIODIC_XON_EN_MASK   BIT(1)
+#define      MVPP2_GMAC_GMII_LB_EN_MASK                BIT(5)
+#define      MVPP2_GMAC_PCS_LB_EN_BIT          6
+#define      MVPP2_GMAC_PCS_LB_EN_MASK         BIT(6)
+#define      MVPP2_GMAC_SA_LOW_OFFS            7
+#define MVPP2_GMAC_CTRL_2_REG                  0x8
+#define      MVPP2_GMAC_INBAND_AN_MASK         BIT(0)
+#define      MVPP2_GMAC_PCS_ENABLE_MASK                BIT(3)
+#define      MVPP2_GMAC_PORT_RGMII_MASK                BIT(4)
+#define      MVPP2_GMAC_PORT_RESET_MASK                BIT(6)
+#define MVPP2_GMAC_AUTONEG_CONFIG              0xc
+#define      MVPP2_GMAC_FORCE_LINK_DOWN                BIT(0)
+#define      MVPP2_GMAC_FORCE_LINK_PASS                BIT(1)
+#define      MVPP2_GMAC_CONFIG_MII_SPEED       BIT(5)
+#define      MVPP2_GMAC_CONFIG_GMII_SPEED      BIT(6)
+#define      MVPP2_GMAC_AN_SPEED_EN            BIT(7)
+#define      MVPP2_GMAC_FC_ADV_EN              BIT(9)
+#define      MVPP2_GMAC_CONFIG_FULL_DUPLEX     BIT(12)
+#define      MVPP2_GMAC_AN_DUPLEX_EN           BIT(13)
+#define MVPP2_GMAC_PORT_FIFO_CFG_1_REG         0x1c
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_OFFS    6
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK        0x1fc0
+#define      MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(v) (((v) << 6) & \
+                                       MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK)
+
+#define MVPP2_CAUSE_TXQ_SENT_DESC_ALL_MASK     0xff
+
+/* Descriptor ring Macros */
+#define MVPP2_QUEUE_NEXT_DESC(q, index) \
+       (((index) < (q)->last_desc) ? ((index) + 1) : 0)
+
+/* Various constants */
+
+/* Coalescing */
+#define MVPP2_TXDONE_COAL_PKTS_THRESH  15
+#define MVPP2_RX_COAL_PKTS             32
+#define MVPP2_RX_COAL_USEC             100
+
+/* The two bytes Marvell header. Either contains a special value used
+ * by Marvell switches when a specific hardware mode is enabled (not
+ * supported by this driver) or is filled automatically by zeroes on
+ * the RX side. Those two bytes being at the front of the Ethernet
+ * header, they allow to have the IP header aligned on a 4 bytes
+ * boundary automatically: the hardware skips those two bytes on its
+ * own.
+ */
+#define MVPP2_MH_SIZE                  2
+#define MVPP2_ETH_TYPE_LEN             2
+#define MVPP2_PPPOE_HDR_SIZE           8
+#define MVPP2_VLAN_TAG_LEN             4
+
+/* Lbtd 802.3 type */
+#define MVPP2_IP_LBDT_TYPE             0xfffa
+
+#define MVPP2_CPU_D_CACHE_LINE_SIZE    32
+#define MVPP2_TX_CSUM_MAX_SIZE         9800
+
+/* Timeout constants */
+#define MVPP2_TX_DISABLE_TIMEOUT_MSEC  1000
+#define MVPP2_TX_PENDING_TIMEOUT_MSEC  1000
+
+#define MVPP2_TX_MTU_MAX               0x7ffff
+
+/* Maximum number of T-CONTs of PON port */
+#define MVPP2_MAX_TCONT                        16
+
+/* Maximum number of supported ports */
+#define MVPP2_MAX_PORTS                        4
+
+/* Maximum number of TXQs used by single port */
+#define MVPP2_MAX_TXQ                  8
+
+/* Maximum number of RXQs used by single port */
+#define MVPP2_MAX_RXQ                  8
+
+/* Dfault number of RXQs in use */
+#define MVPP2_DEFAULT_RXQ              4
+
+/* Total number of RXQs available to all ports */
+#define MVPP2_RXQ_TOTAL_NUM            (MVPP2_MAX_PORTS * MVPP2_MAX_RXQ)
+
+/* Max number of Rx descriptors */
+#define MVPP2_MAX_RXD                  128
+
+/* Max number of Tx descriptors */
+#define MVPP2_MAX_TXD                  1024
+
+/* Amount of Tx descriptors that can be reserved at once by CPU */
+#define MVPP2_CPU_DESC_CHUNK           64
+
+/* Max number of Tx descriptors in each aggregated queue */
+#define MVPP2_AGGR_TXQ_SIZE            256
+
+/* Descriptor aligned size */
+#define MVPP2_DESC_ALIGNED_SIZE                32
+
+/* Descriptor alignment mask */
+#define MVPP2_TX_DESC_ALIGN            (MVPP2_DESC_ALIGNED_SIZE - 1)
+
+/* RX FIFO constants */
+#define MVPP2_RX_FIFO_PORT_DATA_SIZE   0x2000
+#define MVPP2_RX_FIFO_PORT_ATTR_SIZE   0x80
+#define MVPP2_RX_FIFO_PORT_MIN_PKT     0x80
+
+/* RX buffer constants */
+#define MVPP2_SKB_SHINFO_SIZE \
+       SKB_DATA_ALIGN(sizeof(struct skb_shared_info))
+
+#define MVPP2_RX_PKT_SIZE(mtu) \
+       ALIGN((mtu) + MVPP2_MH_SIZE + MVPP2_VLAN_TAG_LEN + \
+             ETH_HLEN + ETH_FCS_LEN, MVPP2_CPU_D_CACHE_LINE_SIZE)
+
+#define MVPP2_RX_BUF_SIZE(pkt_size)    ((pkt_size) + NET_SKB_PAD)
+#define MVPP2_RX_TOTAL_SIZE(buf_size)  ((buf_size) + MVPP2_SKB_SHINFO_SIZE)
+#define MVPP2_RX_MAX_PKT_SIZE(total_size) \
+       ((total_size) - NET_SKB_PAD - MVPP2_SKB_SHINFO_SIZE)
+
+#define MVPP2_BIT_TO_BYTE(bit)         ((bit) / 8)
+
+/* IPv6 max L3 address size */
+#define MVPP2_MAX_L3_ADDR_SIZE         16
+
+/* Port flags */
+#define MVPP2_F_LOOPBACK               BIT(0)
+
+/* Marvell tag types */
+enum mvpp2_tag_type {
+       MVPP2_TAG_TYPE_NONE = 0,
+       MVPP2_TAG_TYPE_MH   = 1,
+       MVPP2_TAG_TYPE_DSA  = 2,
+       MVPP2_TAG_TYPE_EDSA = 3,
+       MVPP2_TAG_TYPE_VLAN = 4,
+       MVPP2_TAG_TYPE_LAST = 5
+};
+
+/* Parser constants */
+#define MVPP2_PRS_TCAM_SRAM_SIZE       256
+#define MVPP2_PRS_TCAM_WORDS           6
+#define MVPP2_PRS_SRAM_WORDS           4
+#define MVPP2_PRS_FLOW_ID_SIZE         64
+#define MVPP2_PRS_FLOW_ID_MASK         0x3f
+#define MVPP2_PRS_TCAM_ENTRY_INVALID   1
+#define MVPP2_PRS_TCAM_DSA_TAGGED_BIT  BIT(5)
+#define MVPP2_PRS_IPV4_HEAD            0x40
+#define MVPP2_PRS_IPV4_HEAD_MASK       0xf0
+#define MVPP2_PRS_IPV4_MC              0xe0
+#define MVPP2_PRS_IPV4_MC_MASK         0xf0
+#define MVPP2_PRS_IPV4_BC_MASK         0xff
+#define MVPP2_PRS_IPV4_IHL             0x5
+#define MVPP2_PRS_IPV4_IHL_MASK                0xf
+#define MVPP2_PRS_IPV6_MC              0xff
+#define MVPP2_PRS_IPV6_MC_MASK         0xff
+#define MVPP2_PRS_IPV6_HOP_MASK                0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK      0xff
+#define MVPP2_PRS_TCAM_PROTO_MASK_L    0x3f
+#define MVPP2_PRS_DBL_VLANS_MAX                100
+
+/* Tcam structure:
+ * - lookup ID - 4 bits
+ * - port ID - 1 byte
+ * - additional information - 1 byte
+ * - header data - 8 bytes
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(5)->(0).
+ */
+#define MVPP2_PRS_AI_BITS                      8
+#define MVPP2_PRS_PORT_MASK                    0xff
+#define MVPP2_PRS_LU_MASK                      0xf
+#define MVPP2_PRS_TCAM_DATA_BYTE(offs)         \
+                                   (((offs) - ((offs) % 2)) * 2 + ((offs) % 2))
+#define MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)      \
+                                             (((offs) * 2) - ((offs) % 2)  + 2)
+#define MVPP2_PRS_TCAM_AI_BYTE                 16
+#define MVPP2_PRS_TCAM_PORT_BYTE               17
+#define MVPP2_PRS_TCAM_LU_BYTE                 20
+#define MVPP2_PRS_TCAM_EN_OFFS(offs)           ((offs) + 2)
+#define MVPP2_PRS_TCAM_INV_WORD                        5
+/* Tcam entries ID */
+#define MVPP2_PE_DROP_ALL              0
+#define MVPP2_PE_FIRST_FREE_TID                1
+#define MVPP2_PE_LAST_FREE_TID         (MVPP2_PRS_TCAM_SRAM_SIZE - 31)
+#define MVPP2_PE_IP6_EXT_PROTO_UN      (MVPP2_PRS_TCAM_SRAM_SIZE - 30)
+#define MVPP2_PE_MAC_MC_IP6            (MVPP2_PRS_TCAM_SRAM_SIZE - 29)
+#define MVPP2_PE_IP6_ADDR_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 28)
+#define MVPP2_PE_IP4_ADDR_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 27)
+#define MVPP2_PE_LAST_DEFAULT_FLOW     (MVPP2_PRS_TCAM_SRAM_SIZE - 26)
+#define MVPP2_PE_FIRST_DEFAULT_FLOW    (MVPP2_PRS_TCAM_SRAM_SIZE - 19)
+#define MVPP2_PE_EDSA_TAGGED           (MVPP2_PRS_TCAM_SRAM_SIZE - 18)
+#define MVPP2_PE_EDSA_UNTAGGED         (MVPP2_PRS_TCAM_SRAM_SIZE - 17)
+#define MVPP2_PE_DSA_TAGGED            (MVPP2_PRS_TCAM_SRAM_SIZE - 16)
+#define MVPP2_PE_DSA_UNTAGGED          (MVPP2_PRS_TCAM_SRAM_SIZE - 15)
+#define MVPP2_PE_ETYPE_EDSA_TAGGED     (MVPP2_PRS_TCAM_SRAM_SIZE - 14)
+#define MVPP2_PE_ETYPE_EDSA_UNTAGGED   (MVPP2_PRS_TCAM_SRAM_SIZE - 13)
+#define MVPP2_PE_ETYPE_DSA_TAGGED      (MVPP2_PRS_TCAM_SRAM_SIZE - 12)
+#define MVPP2_PE_ETYPE_DSA_UNTAGGED    (MVPP2_PRS_TCAM_SRAM_SIZE - 11)
+#define MVPP2_PE_MH_DEFAULT            (MVPP2_PRS_TCAM_SRAM_SIZE - 10)
+#define MVPP2_PE_DSA_DEFAULT           (MVPP2_PRS_TCAM_SRAM_SIZE - 9)
+#define MVPP2_PE_IP6_PROTO_UN          (MVPP2_PRS_TCAM_SRAM_SIZE - 8)
+#define MVPP2_PE_IP4_PROTO_UN          (MVPP2_PRS_TCAM_SRAM_SIZE - 7)
+#define MVPP2_PE_ETH_TYPE_UN           (MVPP2_PRS_TCAM_SRAM_SIZE - 6)
+#define MVPP2_PE_VLAN_DBL              (MVPP2_PRS_TCAM_SRAM_SIZE - 5)
+#define MVPP2_PE_VLAN_NONE             (MVPP2_PRS_TCAM_SRAM_SIZE - 4)
+#define MVPP2_PE_MAC_MC_ALL            (MVPP2_PRS_TCAM_SRAM_SIZE - 3)
+#define MVPP2_PE_MAC_PROMISCUOUS       (MVPP2_PRS_TCAM_SRAM_SIZE - 2)
+#define MVPP2_PE_MAC_NON_PROMISCUOUS   (MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+
+/* Sram structure
+ * The fields are represented by MVPP2_PRS_TCAM_DATA_REG(3)->(0).
+ */
+#define MVPP2_PRS_SRAM_RI_OFFS                 0
+#define MVPP2_PRS_SRAM_RI_WORD                 0
+#define MVPP2_PRS_SRAM_RI_CTRL_OFFS            32
+#define MVPP2_PRS_SRAM_RI_CTRL_WORD            1
+#define MVPP2_PRS_SRAM_RI_CTRL_BITS            32
+#define MVPP2_PRS_SRAM_SHIFT_OFFS              64
+#define MVPP2_PRS_SRAM_SHIFT_SIGN_BIT          72
+#define MVPP2_PRS_SRAM_UDF_OFFS                        73
+#define MVPP2_PRS_SRAM_UDF_BITS                        8
+#define MVPP2_PRS_SRAM_UDF_MASK                        0xff
+#define MVPP2_PRS_SRAM_UDF_SIGN_BIT            81
+#define MVPP2_PRS_SRAM_UDF_TYPE_OFFS           82
+#define MVPP2_PRS_SRAM_UDF_TYPE_MASK           0x7
+#define MVPP2_PRS_SRAM_UDF_TYPE_L3             1
+#define MVPP2_PRS_SRAM_UDF_TYPE_L4             4
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS       85
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK       0x3
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD                1
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP4_ADD    2
+#define MVPP2_PRS_SRAM_OP_SEL_SHIFT_IP6_ADD    3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS         87
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_BITS         2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_MASK         0x3
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_ADD          0
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP4_ADD      2
+#define MVPP2_PRS_SRAM_OP_SEL_UDF_IP6_ADD      3
+#define MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS                89
+#define MVPP2_PRS_SRAM_AI_OFFS                 90
+#define MVPP2_PRS_SRAM_AI_CTRL_OFFS            98
+#define MVPP2_PRS_SRAM_AI_CTRL_BITS            8
+#define MVPP2_PRS_SRAM_AI_MASK                 0xff
+#define MVPP2_PRS_SRAM_NEXT_LU_OFFS            106
+#define MVPP2_PRS_SRAM_NEXT_LU_MASK            0xf
+#define MVPP2_PRS_SRAM_LU_DONE_BIT             110
+#define MVPP2_PRS_SRAM_LU_GEN_BIT              111
+
+/* Sram result info bits assignment */
+#define MVPP2_PRS_RI_MAC_ME_MASK               0x1
+#define MVPP2_PRS_RI_DSA_MASK                  0x2
+#define MVPP2_PRS_RI_VLAN_MASK                 0xc
+#define MVPP2_PRS_RI_VLAN_NONE                 ~(BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_VLAN_SINGLE               BIT(2)
+#define MVPP2_PRS_RI_VLAN_DOUBLE               BIT(3)
+#define MVPP2_PRS_RI_VLAN_TRIPLE               (BIT(2) | BIT(3))
+#define MVPP2_PRS_RI_CPU_CODE_MASK             0x70
+#define MVPP2_PRS_RI_CPU_CODE_RX_SPEC          BIT(4)
+#define MVPP2_PRS_RI_L2_CAST_MASK              0x600
+#define MVPP2_PRS_RI_L2_UCAST                  ~(BIT(9) | BIT(10))
+#define MVPP2_PRS_RI_L2_MCAST                  BIT(9)
+#define MVPP2_PRS_RI_L2_BCAST                  BIT(10)
+#define MVPP2_PRS_RI_PPPOE_MASK                        0x800
+#define MVPP2_PRS_RI_L3_PROTO_MASK             0x7000
+#define MVPP2_PRS_RI_L3_UN                     ~(BIT(12) | BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_IP4                    BIT(12)
+#define MVPP2_PRS_RI_L3_IP4_OPT                        BIT(13)
+#define MVPP2_PRS_RI_L3_IP4_OTHER              (BIT(12) | BIT(13))
+#define MVPP2_PRS_RI_L3_IP6                    BIT(14)
+#define MVPP2_PRS_RI_L3_IP6_EXT                        (BIT(12) | BIT(14))
+#define MVPP2_PRS_RI_L3_ARP                    (BIT(13) | BIT(14))
+#define MVPP2_PRS_RI_L3_ADDR_MASK              0x18000
+#define MVPP2_PRS_RI_L3_UCAST                  ~(BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_L3_MCAST                  BIT(15)
+#define MVPP2_PRS_RI_L3_BCAST                  (BIT(15) | BIT(16))
+#define MVPP2_PRS_RI_IP_FRAG_MASK              0x20000
+#define MVPP2_PRS_RI_UDF3_MASK                 0x300000
+#define MVPP2_PRS_RI_UDF3_RX_SPECIAL           BIT(21)
+#define MVPP2_PRS_RI_L4_PROTO_MASK             0x1c00000
+#define MVPP2_PRS_RI_L4_TCP                    BIT(22)
+#define MVPP2_PRS_RI_L4_UDP                    BIT(23)
+#define MVPP2_PRS_RI_L4_OTHER                  (BIT(22) | BIT(23))
+#define MVPP2_PRS_RI_UDF7_MASK                 0x60000000
+#define MVPP2_PRS_RI_UDF7_IP6_LITE             BIT(29)
+#define MVPP2_PRS_RI_DROP_MASK                 0x80000000
+
+/* Sram additional info bits assignment */
+#define MVPP2_PRS_IPV4_DIP_AI_BIT              BIT(0)
+#define MVPP2_PRS_IPV6_NO_EXT_AI_BIT           BIT(0)
+#define MVPP2_PRS_IPV6_EXT_AI_BIT              BIT(1)
+#define MVPP2_PRS_IPV6_EXT_AH_AI_BIT           BIT(2)
+#define MVPP2_PRS_IPV6_EXT_AH_LEN_AI_BIT       BIT(3)
+#define MVPP2_PRS_IPV6_EXT_AH_L4_AI_BIT                BIT(4)
+#define MVPP2_PRS_SINGLE_VLAN_AI               0
+#define MVPP2_PRS_DBL_VLAN_AI_BIT              BIT(7)
+
+/* DSA/EDSA type */
+#define MVPP2_PRS_TAGGED               true
+#define MVPP2_PRS_UNTAGGED             false
+#define MVPP2_PRS_EDSA                 true
+#define MVPP2_PRS_DSA                  false
+
+/* MAC entries, shadow udf */
+enum mvpp2_prs_udf {
+       MVPP2_PRS_UDF_MAC_DEF,
+       MVPP2_PRS_UDF_MAC_RANGE,
+       MVPP2_PRS_UDF_L2_DEF,
+       MVPP2_PRS_UDF_L2_DEF_COPY,
+       MVPP2_PRS_UDF_L2_USER,
+};
+
+/* Lookup ID */
+enum mvpp2_prs_lookup {
+       MVPP2_PRS_LU_MH,
+       MVPP2_PRS_LU_MAC,
+       MVPP2_PRS_LU_DSA,
+       MVPP2_PRS_LU_VLAN,
+       MVPP2_PRS_LU_L2,
+       MVPP2_PRS_LU_PPPOE,
+       MVPP2_PRS_LU_IP4,
+       MVPP2_PRS_LU_IP6,
+       MVPP2_PRS_LU_FLOWS,
+       MVPP2_PRS_LU_LAST,
+};
+
+/* L3 cast enum */
+enum mvpp2_prs_l3_cast {
+       MVPP2_PRS_L3_UNI_CAST,
+       MVPP2_PRS_L3_MULTI_CAST,
+       MVPP2_PRS_L3_BROAD_CAST
+};
+
+/* Classifier constants */
+#define MVPP2_CLS_FLOWS_TBL_SIZE       512
+#define MVPP2_CLS_FLOWS_TBL_DATA_WORDS 3
+#define MVPP2_CLS_LKP_TBL_SIZE         64
+
+/* BM constants */
+#define MVPP2_BM_POOLS_NUM             8
+#define MVPP2_BM_LONG_BUF_NUM          1024
+#define MVPP2_BM_SHORT_BUF_NUM         2048
+#define MVPP2_BM_POOL_SIZE_MAX         (16*1024 - MVPP2_BM_POOL_PTR_ALIGN/4)
+#define MVPP2_BM_POOL_PTR_ALIGN                128
+#define MVPP2_BM_SWF_LONG_POOL(port)   ((port > 2) ? 2 : port)
+#define MVPP2_BM_SWF_SHORT_POOL                3
+
+/* BM cookie (32 bits) definition */
+#define MVPP2_BM_COOKIE_POOL_OFFS      8
+#define MVPP2_BM_COOKIE_CPU_OFFS       24
+
+/* BM short pool packet size
+ * These value assure that for SWF the total number
+ * of bytes allocated for each buffer will be 512
+ */
+#define MVPP2_BM_SHORT_PKT_SIZE                MVPP2_RX_MAX_PKT_SIZE(512)
+
+enum mvpp2_bm_type {
+       MVPP2_BM_FREE,
+       MVPP2_BM_SWF_LONG,
+       MVPP2_BM_SWF_SHORT
+};
+
+/* Definitions */
+
+/* Shared Packet Processor resources */
+struct mvpp2 {
+       /* Shared registers' base addresses */
+       void __iomem *base;
+       void __iomem *lms_base;
+
+       /* Common clocks */
+       struct clk *pp_clk;
+       struct clk *gop_clk;
+
+       /* List of pointers to port structures */
+       struct mvpp2_port **port_list;
+
+       /* Aggregated TXQs */
+       struct mvpp2_tx_queue *aggr_txqs;
+
+       /* BM pools */
+       struct mvpp2_bm_pool *bm_pools;
+
+       /* PRS shadow table */
+       struct mvpp2_prs_shadow *prs_shadow;
+       /* PRS auxiliary table for double vlan entries control */
+       bool *prs_double_vlans;
+
+       /* Tclk value */
+       u32 tclk;
+};
+
+struct mvpp2_pcpu_stats {
+       struct  u64_stats_sync syncp;
+       u64     rx_packets;
+       u64     rx_bytes;
+       u64     tx_packets;
+       u64     tx_bytes;
+};
+
+struct mvpp2_port {
+       u8 id;
+
+       int irq;
+
+       struct mvpp2 *priv;
+
+       /* Per-port registers' base address */
+       void __iomem *base;
+
+       struct mvpp2_rx_queue **rxqs;
+       struct mvpp2_tx_queue **txqs;
+       struct net_device *dev;
+
+       int pkt_size;
+
+       u32 pending_cause_rx;
+       struct napi_struct napi;
+
+       /* Flags */
+       unsigned long flags;
+
+       u16 tx_ring_size;
+       u16 rx_ring_size;
+       struct mvpp2_pcpu_stats __percpu *stats;
+
+       struct phy_device *phy_dev;
+       phy_interface_t phy_interface;
+       struct device_node *phy_node;
+       unsigned int link;
+       unsigned int duplex;
+       unsigned int speed;
+
+       struct mvpp2_bm_pool *pool_long;
+       struct mvpp2_bm_pool *pool_short;
+
+       /* Index of first port's physical RXQ */
+       u8 first_rxq;
+};
+
+/* The mvpp2_tx_desc and mvpp2_rx_desc structures describe the
+ * layout of the transmit and reception DMA descriptors, and their
+ * layout is therefore defined by the hardware design
+ */
+
+#define MVPP2_TXD_L3_OFF_SHIFT         0
+#define MVPP2_TXD_IP_HLEN_SHIFT                8
+#define MVPP2_TXD_L4_CSUM_FRAG         BIT(13)
+#define MVPP2_TXD_L4_CSUM_NOT          BIT(14)
+#define MVPP2_TXD_IP_CSUM_DISABLE      BIT(15)
+#define MVPP2_TXD_PADDING_DISABLE      BIT(23)
+#define MVPP2_TXD_L4_UDP               BIT(24)
+#define MVPP2_TXD_L3_IP6               BIT(26)
+#define MVPP2_TXD_L_DESC               BIT(28)
+#define MVPP2_TXD_F_DESC               BIT(29)
+
+#define MVPP2_RXD_ERR_SUMMARY          BIT(15)
+#define MVPP2_RXD_ERR_CODE_MASK                (BIT(13) | BIT(14))
+#define MVPP2_RXD_ERR_CRC              0x0
+#define MVPP2_RXD_ERR_OVERRUN          BIT(13)
+#define MVPP2_RXD_ERR_RESOURCE         (BIT(13) | BIT(14))
+#define MVPP2_RXD_BM_POOL_ID_OFFS      16
+#define MVPP2_RXD_BM_POOL_ID_MASK      (BIT(16) | BIT(17) | BIT(18))
+#define MVPP2_RXD_HWF_SYNC             BIT(21)
+#define MVPP2_RXD_L4_CSUM_OK           BIT(22)
+#define MVPP2_RXD_IP4_HEADER_ERR       BIT(24)
+#define MVPP2_RXD_L4_TCP               BIT(25)
+#define MVPP2_RXD_L4_UDP               BIT(26)
+#define MVPP2_RXD_L3_IP4               BIT(28)
+#define MVPP2_RXD_L3_IP6               BIT(30)
+#define MVPP2_RXD_BUF_HDR              BIT(31)
+
+struct mvpp2_tx_desc {
+       u32 command;            /* Options used by HW for packet transmitting.*/
+       u8  packet_offset;      /* the offset from the buffer beginning */
+       u8  phys_txq;           /* destination queue ID                 */
+       u16 data_size;          /* data size of transmitted packet in bytes */
+       u32 buf_phys_addr;      /* physical addr of transmitted buffer  */
+       u32 buf_cookie;         /* cookie for access to TX buffer in tx path */
+       u32 reserved1[3];       /* hw_cmd (for future use, BM, PON, PNC) */
+       u32 reserved2;          /* reserved (for future use)            */
+};
+
+struct mvpp2_rx_desc {
+       u32 status;             /* info about received packet           */
+       u16 reserved1;          /* parser_info (for future use, PnC)    */
+       u16 data_size;          /* size of received packet in bytes     */
+       u32 buf_phys_addr;      /* physical address of the buffer       */
+       u32 buf_cookie;         /* cookie for access to RX buffer in rx path */
+       u16 reserved2;          /* gem_port_id (for future use, PON)    */
+       u16 reserved3;          /* csum_l4 (for future use, PnC)        */
+       u8  reserved4;          /* bm_qset (for future use, BM)         */
+       u8  reserved5;
+       u16 reserved6;          /* classify_info (for future use, PnC)  */
+       u32 reserved7;          /* flow_id (for future use, PnC) */
+       u32 reserved8;
+};
+
+/* Per-CPU Tx queue control */
+struct mvpp2_txq_pcpu {
+       int cpu;
+
+       /* Number of Tx DMA descriptors in the descriptor ring */
+       int size;
+
+       /* Number of currently used Tx DMA descriptor in the
+        * descriptor ring
+        */
+       int count;
+
+       /* Number of Tx DMA descriptors reserved for each CPU */
+       int reserved_num;
+
+       /* Array of transmitted skb */
+       struct sk_buff **tx_skb;
+
+       /* Index of last TX DMA descriptor that was inserted */
+       int txq_put_index;
+
+       /* Index of the TX DMA descriptor to be cleaned up */
+       int txq_get_index;
+};
+
+struct mvpp2_tx_queue {
+       /* Physical number of this Tx queue */
+       u8 id;
+
+       /* Logical number of this Tx queue */
+       u8 log_id;
+
+       /* Number of Tx DMA descriptors in the descriptor ring */
+       int size;
+
+       /* Number of currently used Tx DMA descriptor in the descriptor ring */
+       int count;
+
+       /* Per-CPU control of physical Tx queues */
+       struct mvpp2_txq_pcpu __percpu *pcpu;
+
+       /* Array of transmitted skb */
+       struct sk_buff **tx_skb;
+
+       u32 done_pkts_coal;
+
+       /* Virtual address of thex Tx DMA descriptors array */
+       struct mvpp2_tx_desc *descs;
+
+       /* DMA address of the Tx DMA descriptors array */
+       dma_addr_t descs_phys;
+
+       /* Index of the last Tx DMA descriptor */
+       int last_desc;
+
+       /* Index of the next Tx DMA descriptor to process */
+       int next_desc_to_proc;
+};
+
+struct mvpp2_rx_queue {
+       /* RX queue number, in the range 0-31 for physical RXQs */
+       u8 id;
+
+       /* Num of rx descriptors in the rx descriptor ring */
+       int size;
+
+       u32 pkts_coal;
+       u32 time_coal;
+
+       /* Virtual address of the RX DMA descriptors array */
+       struct mvpp2_rx_desc *descs;
+
+       /* DMA address of the RX DMA descriptors array */
+       dma_addr_t descs_phys;
+
+       /* Index of the last RX DMA descriptor */
+       int last_desc;
+
+       /* Index of the next RX DMA descriptor to process */
+       int next_desc_to_proc;
+
+       /* ID of port to which physical RXQ is mapped */
+       int port;
+
+       /* Port's logic RXQ number to which physical RXQ is mapped */
+       int logic_rxq;
+};
+
+union mvpp2_prs_tcam_entry {
+       u32 word[MVPP2_PRS_TCAM_WORDS];
+       u8  byte[MVPP2_PRS_TCAM_WORDS * 4];
+};
+
+union mvpp2_prs_sram_entry {
+       u32 word[MVPP2_PRS_SRAM_WORDS];
+       u8  byte[MVPP2_PRS_SRAM_WORDS * 4];
+};
+
+struct mvpp2_prs_entry {
+       u32 index;
+       union mvpp2_prs_tcam_entry tcam;
+       union mvpp2_prs_sram_entry sram;
+};
+
+struct mvpp2_prs_shadow {
+       bool valid;
+       bool finish;
+
+       /* Lookup ID */
+       int lu;
+
+       /* User defined offset */
+       int udf;
+
+       /* Result info */
+       u32 ri;
+       u32 ri_mask;
+};
+
+struct mvpp2_cls_flow_entry {
+       u32 index;
+       u32 data[MVPP2_CLS_FLOWS_TBL_DATA_WORDS];
+};
+
+struct mvpp2_cls_lookup_entry {
+       u32 lkpid;
+       u32 way;
+       u32 data;
+};
+
+struct mvpp2_bm_pool {
+       /* Pool number in the range 0-7 */
+       int id;
+       enum mvpp2_bm_type type;
+
+       /* Buffer Pointers Pool External (BPPE) size */
+       int size;
+       /* Number of buffers for this pool */
+       int buf_num;
+       /* Pool buffer size */
+       int buf_size;
+       /* Packet size */
+       int pkt_size;
+
+       /* BPPE virtual base address */
+       u32 *virt_addr;
+       /* BPPE physical base address */
+       dma_addr_t phys_addr;
+
+       /* Ports using BM pool */
+       u32 port_map;
+
+       /* Occupied buffers indicator */
+       atomic_t in_use;
+       int in_use_thresh;
+
+       spinlock_t lock;
+};
+
+struct mvpp2_buff_hdr {
+       u32 next_buff_phys_addr;
+       u32 next_buff_virt_addr;
+       u16 byte_count;
+       u16 info;
+       u8  reserved1;          /* bm_qset (for future use, BM)         */
+};
+
+/* Buffer header info bits */
+#define MVPP2_B_HDR_INFO_MC_ID_MASK    0xfff
+#define MVPP2_B_HDR_INFO_MC_ID(info)   ((info) & MVPP2_B_HDR_INFO_MC_ID_MASK)
+#define MVPP2_B_HDR_INFO_LAST_OFFS     12
+#define MVPP2_B_HDR_INFO_LAST_MASK     BIT(12)
+#define MVPP2_B_HDR_INFO_IS_LAST(info) \
+          ((info & MVPP2_B_HDR_INFO_LAST_MASK) >> MVPP2_B_HDR_INFO_LAST_OFFS)
+
+/* Static declaractions */
+
+/* Number of RXQs used by single port */
+static int rxq_number = MVPP2_DEFAULT_RXQ;
+/* Number of TXQs used by single port */
+static int txq_number = MVPP2_MAX_TXQ;
+
+#define MVPP2_DRIVER_NAME "mvpp2"
+#define MVPP2_DRIVER_VERSION "1.0"
+
+/* Utility/helper methods */
+
+static void mvpp2_write(struct mvpp2 *priv, u32 offset, u32 data)
+{
+       writel(data, priv->base + offset);
+}
+
+static u32 mvpp2_read(struct mvpp2 *priv, u32 offset)
+{
+       return readl(priv->base + offset);
+}
+
+static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
+{
+       txq_pcpu->txq_get_index++;
+       if (txq_pcpu->txq_get_index == txq_pcpu->size)
+               txq_pcpu->txq_get_index = 0;
+}
+
+static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
+                             struct sk_buff *skb)
+{
+       txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
+       txq_pcpu->txq_put_index++;
+       if (txq_pcpu->txq_put_index == txq_pcpu->size)
+               txq_pcpu->txq_put_index = 0;
+}
+
+/* Get number of physical egress port */
+static inline int mvpp2_egress_port(struct mvpp2_port *port)
+{
+       return MVPP2_MAX_TCONT + port->id;
+}
+
+/* Get number of physical TXQ */
+static inline int mvpp2_txq_phys(int port, int txq)
+{
+       return (MVPP2_MAX_TCONT + port) * MVPP2_MAX_TXQ + txq;
+}
+
+/* Parser configuration routines */
+
+/* Update parser tcam and sram hw entries */
+static int mvpp2_prs_hw_write(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+       int i;
+
+       if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+               return -EINVAL;
+
+       /* Clear entry invalidation bit */
+       pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] &= ~MVPP2_PRS_TCAM_INV_MASK;
+
+       /* Write tcam index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+       for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+               mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), pe->tcam.word[i]);
+
+       /* Write sram index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+       for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+               mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), pe->sram.word[i]);
+
+       return 0;
+}
+
+/* Read tcam entry from hw */
+static int mvpp2_prs_hw_read(struct mvpp2 *priv, struct mvpp2_prs_entry *pe)
+{
+       int i;
+
+       if (pe->index > MVPP2_PRS_TCAM_SRAM_SIZE - 1)
+               return -EINVAL;
+
+       /* Write tcam index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, pe->index);
+
+       pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] = mvpp2_read(priv,
+                             MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD));
+       if (pe->tcam.word[MVPP2_PRS_TCAM_INV_WORD] & MVPP2_PRS_TCAM_INV_MASK)
+               return MVPP2_PRS_TCAM_ENTRY_INVALID;
+
+       for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+               pe->tcam.word[i] = mvpp2_read(priv, MVPP2_PRS_TCAM_DATA_REG(i));
+
+       /* Write sram index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, pe->index);
+       for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+               pe->sram.word[i] = mvpp2_read(priv, MVPP2_PRS_SRAM_DATA_REG(i));
+
+       return 0;
+}
+
+/* Invalidate tcam hw entry */
+static void mvpp2_prs_hw_inv(struct mvpp2 *priv, int index)
+{
+       /* Write index - indirect access */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+       mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(MVPP2_PRS_TCAM_INV_WORD),
+                   MVPP2_PRS_TCAM_INV_MASK);
+}
+
+/* Enable shadow table entry and set its lookup ID */
+static void mvpp2_prs_shadow_set(struct mvpp2 *priv, int index, int lu)
+{
+       priv->prs_shadow[index].valid = true;
+       priv->prs_shadow[index].lu = lu;
+}
+
+/* Update ri fields in shadow table entry */
+static void mvpp2_prs_shadow_ri_set(struct mvpp2 *priv, int index,
+                                   unsigned int ri, unsigned int ri_mask)
+{
+       priv->prs_shadow[index].ri_mask = ri_mask;
+       priv->prs_shadow[index].ri = ri;
+}
+
+/* Update lookup field in tcam sw entry */
+static void mvpp2_prs_tcam_lu_set(struct mvpp2_prs_entry *pe, unsigned int lu)
+{
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_LU_BYTE);
+
+       pe->tcam.byte[MVPP2_PRS_TCAM_LU_BYTE] = lu;
+       pe->tcam.byte[enable_off] = MVPP2_PRS_LU_MASK;
+}
+
+/* Update mask for single port in tcam sw entry */
+static void mvpp2_prs_tcam_port_set(struct mvpp2_prs_entry *pe,
+                                   unsigned int port, bool add)
+{
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+       if (add)
+               pe->tcam.byte[enable_off] &= ~(1 << port);
+       else
+               pe->tcam.byte[enable_off] |= 1 << port;
+}
+
+/* Update port map in tcam sw entry */
+static void mvpp2_prs_tcam_port_map_set(struct mvpp2_prs_entry *pe,
+                                       unsigned int ports)
+{
+       unsigned char port_mask = MVPP2_PRS_PORT_MASK;
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+       pe->tcam.byte[MVPP2_PRS_TCAM_PORT_BYTE] = 0;
+       pe->tcam.byte[enable_off] &= ~port_mask;
+       pe->tcam.byte[enable_off] |= ~ports & MVPP2_PRS_PORT_MASK;
+}
+
+/* Obtain port map from tcam sw entry */
+static unsigned int mvpp2_prs_tcam_port_map_get(struct mvpp2_prs_entry *pe)
+{
+       int enable_off = MVPP2_PRS_TCAM_EN_OFFS(MVPP2_PRS_TCAM_PORT_BYTE);
+
+       return ~(pe->tcam.byte[enable_off]) & MVPP2_PRS_PORT_MASK;
+}
+
+/* Set byte of data and its enable bits in tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_set(struct mvpp2_prs_entry *pe,
+                                        unsigned int offs, unsigned char byte,
+                                        unsigned char enable)
+{
+       pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)] = byte;
+       pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)] = enable;
+}
+
+/* Get byte of data and its enable bits from tcam sw entry */
+static void mvpp2_prs_tcam_data_byte_get(struct mvpp2_prs_entry *pe,
+                                        unsigned int offs, unsigned char *byte,
+                                        unsigned char *enable)
+{
+       *byte = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(offs)];
+       *enable = pe->tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(offs)];
+}
+
+/* Compare tcam data bytes with a pattern */
+static bool mvpp2_prs_tcam_data_cmp(struct mvpp2_prs_entry *pe, int offs,
+                                   u16 data)
+{
+       int off = MVPP2_PRS_TCAM_DATA_BYTE(offs);
+       u16 tcam_data;
+
+       tcam_data = (8 << pe->tcam.byte[off + 1]) | pe->tcam.byte[off];
+       if (tcam_data != data)
+               return false;
+       return true;
+}
+
+/* Update ai bits in tcam sw entry */
+static void mvpp2_prs_tcam_ai_update(struct mvpp2_prs_entry *pe,
+                                    unsigned int bits, unsigned int enable)
+{
+       int i, ai_idx = MVPP2_PRS_TCAM_AI_BYTE;
+
+       for (i = 0; i < MVPP2_PRS_AI_BITS; i++) {
+
+               if (!(enable & BIT(i)))
+                       continue;
+
+               if (bits & BIT(i))
+                       pe->tcam.byte[ai_idx] |= 1 << i;
+               else
+                       pe->tcam.byte[ai_idx] &= ~(1 << i);
+       }
+
+       pe->tcam.byte[MVPP2_PRS_TCAM_EN_OFFS(ai_idx)] |= enable;
+}
+
+/* Get ai bits from tcam sw entry */
+static int mvpp2_prs_tcam_ai_get(struct mvpp2_prs_entry *pe)
+{
+       return pe->tcam.byte[MVPP2_PRS_TCAM_AI_BYTE];
+}
+
+/* Set ethertype in tcam sw entry */
+static void mvpp2_prs_match_etype(struct mvpp2_prs_entry *pe, int offset,
+                                 unsigned short ethertype)
+{
+       mvpp2_prs_tcam_data_byte_set(pe, offset + 0, ethertype >> 8, 0xff);
+       mvpp2_prs_tcam_data_byte_set(pe, offset + 1, ethertype & 0xff, 0xff);
+}
+
+/* Set bits in sram sw entry */
+static void mvpp2_prs_sram_bits_set(struct mvpp2_prs_entry *pe, int bit_num,
+                                   int val)
+{
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] |= (val << (bit_num % 8));
+}
+
+/* Clear bits in sram sw entry */
+static void mvpp2_prs_sram_bits_clear(struct mvpp2_prs_entry *pe, int bit_num,
+                                     int val)
+{
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(bit_num)] &= ~(val << (bit_num % 8));
+}
+
+/* Update ri bits in sram sw entry */
+static void mvpp2_prs_sram_ri_update(struct mvpp2_prs_entry *pe,
+                                    unsigned int bits, unsigned int mask)
+{
+       unsigned int i;
+
+       for (i = 0; i < MVPP2_PRS_SRAM_RI_CTRL_BITS; i++) {
+               int ri_off = MVPP2_PRS_SRAM_RI_OFFS;
+
+               if (!(mask & BIT(i)))
+                       continue;
+
+               if (bits & BIT(i))
+                       mvpp2_prs_sram_bits_set(pe, ri_off + i, 1);
+               else
+                       mvpp2_prs_sram_bits_clear(pe, ri_off + i, 1);
+
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_RI_CTRL_OFFS + i, 1);
+       }
+}
+
+/* Obtain ri bits from sram sw entry */
+static int mvpp2_prs_sram_ri_get(struct mvpp2_prs_entry *pe)
+{
+       return pe->sram.word[MVPP2_PRS_SRAM_RI_WORD];
+}
+
+/* Update ai bits in sram sw entry */
+static void mvpp2_prs_sram_ai_update(struct mvpp2_prs_entry *pe,
+                                    unsigned int bits, unsigned int mask)
+{
+       unsigned int i;
+       int ai_off = MVPP2_PRS_SRAM_AI_OFFS;
+
+       for (i = 0; i < MVPP2_PRS_SRAM_AI_CTRL_BITS; i++) {
+
+               if (!(mask & BIT(i)))
+                       continue;
+
+               if (bits & BIT(i))
+                       mvpp2_prs_sram_bits_set(pe, ai_off + i, 1);
+               else
+                       mvpp2_prs_sram_bits_clear(pe, ai_off + i, 1);
+
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_AI_CTRL_OFFS + i, 1);
+       }
+}
+
+/* Read ai bits from sram sw entry */
+static int mvpp2_prs_sram_ai_get(struct mvpp2_prs_entry *pe)
+{
+       u8 bits;
+       int ai_off = MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_AI_OFFS);
+       int ai_en_off = ai_off + 1;
+       int ai_shift = MVPP2_PRS_SRAM_AI_OFFS % 8;
+
+       bits = (pe->sram.byte[ai_off] >> ai_shift) |
+              (pe->sram.byte[ai_en_off] << (8 - ai_shift));
+
+       return bits;
+}
+
+/* In sram sw entry set lookup ID field of the tcam key to be used in the next
+ * lookup interation
+ */
+static void mvpp2_prs_sram_next_lu_set(struct mvpp2_prs_entry *pe,
+                                      unsigned int lu)
+{
+       int sram_next_off = MVPP2_PRS_SRAM_NEXT_LU_OFFS;
+
+       mvpp2_prs_sram_bits_clear(pe, sram_next_off,
+                                 MVPP2_PRS_SRAM_NEXT_LU_MASK);
+       mvpp2_prs_sram_bits_set(pe, sram_next_off, lu);
+}
+
+/* In the sram sw entry set sign and value of the next lookup offset
+ * and the offset value generated to the classifier
+ */
+static void mvpp2_prs_sram_shift_set(struct mvpp2_prs_entry *pe, int shift,
+                                    unsigned int op)
+{
+       /* Set sign */
+       if (shift < 0) {
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+               shift = 0 - shift;
+       } else {
+               mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_SHIFT_SIGN_BIT, 1);
+       }
+
+       /* Set value */
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_SHIFT_OFFS)] =
+                                                          (unsigned char)shift;
+
+       /* Reset and set operation */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS,
+                                 MVPP2_PRS_SRAM_OP_SEL_SHIFT_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_SHIFT_OFFS, op);
+
+       /* Set base offset as current */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* In the sram sw entry set sign and value of the user defined offset
+ * generated to the classifier
+ */
+static void mvpp2_prs_sram_offset_set(struct mvpp2_prs_entry *pe,
+                                     unsigned int type, int offset,
+                                     unsigned int op)
+{
+       /* Set sign */
+       if (offset < 0) {
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+               offset = 0 - offset;
+       } else {
+               mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_SIGN_BIT, 1);
+       }
+
+       /* Set value */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_OFFS,
+                                 MVPP2_PRS_SRAM_UDF_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_OFFS, offset);
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_UDF_BITS)] &=
+             ~(MVPP2_PRS_SRAM_UDF_MASK >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_UDF_BITS)] |=
+                               (offset >> (8 - (MVPP2_PRS_SRAM_UDF_OFFS % 8)));
+
+       /* Set offset type */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS,
+                                 MVPP2_PRS_SRAM_UDF_TYPE_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_UDF_TYPE_OFFS, type);
+
+       /* Set offset operation */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_MASK);
+       mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS, op);
+
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] &=
+                                            ~(MVPP2_PRS_SRAM_OP_SEL_UDF_MASK >>
+                                   (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+       pe->sram.byte[MVPP2_BIT_TO_BYTE(MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS +
+                                       MVPP2_PRS_SRAM_OP_SEL_UDF_BITS)] |=
+                            (op >> (8 - (MVPP2_PRS_SRAM_OP_SEL_UDF_OFFS % 8)));
+
+       /* Set base offset as current */
+       mvpp2_prs_sram_bits_clear(pe, MVPP2_PRS_SRAM_OP_SEL_BASE_OFFS, 1);
+}
+
+/* Find parser flow entry */
+static struct mvpp2_prs_entry *mvpp2_prs_flow_find(struct mvpp2 *priv, int flow)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+
+       /* Go through the all entires with MVPP2_PRS_LU_FLOWS */
+       for (tid = MVPP2_PRS_TCAM_SRAM_SIZE - 1; tid >= 0; tid--) {
+               u8 bits;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_FLOWS)
+                       continue;
+
+               pe->index = tid;
+               mvpp2_prs_hw_read(priv, pe);
+               bits = mvpp2_prs_sram_ai_get(pe);
+
+               /* Sram store classification lookup ID in AI bits [5:0] */
+               if ((bits & MVPP2_PRS_FLOW_ID_MASK) == flow)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Return first free tcam index, seeking from start to end */
+static int mvpp2_prs_tcam_first_free(struct mvpp2 *priv, unsigned char start,
+                                    unsigned char end)
+{
+       int tid;
+
+       if (start > end)
+               swap(start, end);
+
+       if (end >= MVPP2_PRS_TCAM_SRAM_SIZE)
+               end = MVPP2_PRS_TCAM_SRAM_SIZE - 1;
+
+       for (tid = start; tid <= end; tid++) {
+               if (!priv->prs_shadow[tid].valid)
+                       return tid;
+       }
+
+       return -EINVAL;
+}
+
+/* Enable/disable dropping all mac da's */
+static void mvpp2_prs_mac_drop_all_set(struct mvpp2 *priv, int port, bool add)
+{
+       struct mvpp2_prs_entry pe;
+
+       if (priv->prs_shadow[MVPP2_PE_DROP_ALL].valid) {
+               /* Entry exist - update port only */
+               pe.index = MVPP2_PE_DROP_ALL;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+               pe.index = MVPP2_PE_DROP_ALL;
+
+               /* Non-promiscuous mode for all ports - DROP unknown packets */
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+                                        MVPP2_PRS_RI_DROP_MASK);
+
+               mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+               mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set port to promiscuous mode */
+static void mvpp2_prs_mac_promisc_set(struct mvpp2 *priv, int port, bool add)
+{
+       struct mvpp2_prs_entry pe;
+
+       /* Promiscous mode - Accept unknown packets */
+
+       if (priv->prs_shadow[MVPP2_PE_MAC_PROMISCUOUS].valid) {
+               /* Entry exist - update port only */
+               pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+               pe.index = MVPP2_PE_MAC_PROMISCUOUS;
+
+               /* Continue - set next lookup */
+               mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+               /* Set result info bits */
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_UCAST,
+                                        MVPP2_PRS_RI_L2_CAST_MASK);
+
+               /* Shift to ethertype */
+               mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Accept multicast */
+static void mvpp2_prs_mac_multi_set(struct mvpp2 *priv, int port, int index,
+                                   bool add)
+{
+       struct mvpp2_prs_entry pe;
+       unsigned char da_mc;
+
+       /* Ethernet multicast address first byte is
+        * 0x01 for IPv4 and 0x33 for IPv6
+        */
+       da_mc = (index == MVPP2_PE_MAC_MC_ALL) ? 0x01 : 0x33;
+
+       if (priv->prs_shadow[index].valid) {
+               /* Entry exist - update port only */
+               pe.index = index;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+               pe.index = index;
+
+               /* Continue - set next lookup */
+               mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_DSA);
+
+               /* Set result info bits */
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L2_MCAST,
+                                        MVPP2_PRS_RI_L2_CAST_MASK);
+
+               /* Update tcam entry data first byte */
+               mvpp2_prs_tcam_data_byte_set(&pe, 0, da_mc, 0xff);
+
+               /* Shift to ethertype */
+               mvpp2_prs_sram_shift_set(&pe, 2 * ETH_ALEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa packets */
+static void mvpp2_prs_dsa_tag_set(struct mvpp2 *priv, int port, bool add,
+                                 bool tagged, bool extend)
+{
+       struct mvpp2_prs_entry pe;
+       int tid, shift;
+
+       if (extend) {
+               tid = tagged ? MVPP2_PE_EDSA_TAGGED : MVPP2_PE_EDSA_UNTAGGED;
+               shift = 8;
+       } else {
+               tid = tagged ? MVPP2_PE_DSA_TAGGED : MVPP2_PE_DSA_UNTAGGED;
+               shift = 4;
+       }
+
+       if (priv->prs_shadow[tid].valid) {
+               /* Entry exist - update port only */
+               pe.index = tid;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+               pe.index = tid;
+
+               /* Shift 4 bytes if DSA tag or 8 bytes in case of EDSA tag*/
+               mvpp2_prs_sram_shift_set(&pe, shift,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+               if (tagged) {
+                       /* Set tagged bit in DSA tag */
+                       mvpp2_prs_tcam_data_byte_set(&pe, 0,
+                                                    MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+                                                    MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+                       /* Clear all ai bits for next iteration */
+                       mvpp2_prs_sram_ai_update(&pe, 0,
+                                                MVPP2_PRS_SRAM_AI_MASK);
+                       /* If packet is tagged continue check vlans */
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+               } else {
+                       /* Set result info bits to 'no vlans' */
+                       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+               }
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set entry for dsa ethertype */
+static void mvpp2_prs_dsa_tag_ethertype_set(struct mvpp2 *priv, int port,
+                                           bool add, bool tagged, bool extend)
+{
+       struct mvpp2_prs_entry pe;
+       int tid, shift, port_mask;
+
+       if (extend) {
+               tid = tagged ? MVPP2_PE_ETYPE_EDSA_TAGGED :
+                     MVPP2_PE_ETYPE_EDSA_UNTAGGED;
+               port_mask = 0;
+               shift = 8;
+       } else {
+               tid = tagged ? MVPP2_PE_ETYPE_DSA_TAGGED :
+                     MVPP2_PE_ETYPE_DSA_UNTAGGED;
+               port_mask = MVPP2_PRS_PORT_MASK;
+               shift = 4;
+       }
+
+       if (priv->prs_shadow[tid].valid) {
+               /* Entry exist - update port only */
+               pe.index = tid;
+               mvpp2_prs_hw_read(priv, &pe);
+       } else {
+               /* Entry doesn't exist - create new */
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+               pe.index = tid;
+
+               /* Set ethertype */
+               mvpp2_prs_match_etype(&pe, 0, ETH_P_EDSA);
+               mvpp2_prs_match_etype(&pe, 2, 0);
+
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DSA_MASK,
+                                        MVPP2_PRS_RI_DSA_MASK);
+               /* Shift ethertype + 2 byte reserved + tag*/
+               mvpp2_prs_sram_shift_set(&pe, 2 + MVPP2_ETH_TYPE_LEN + shift,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_DSA);
+
+               if (tagged) {
+                       /* Set tagged bit in DSA tag */
+                       mvpp2_prs_tcam_data_byte_set(&pe,
+                                                    MVPP2_ETH_TYPE_LEN + 2 + 3,
+                                                MVPP2_PRS_TCAM_DSA_TAGGED_BIT,
+                                                MVPP2_PRS_TCAM_DSA_TAGGED_BIT);
+                       /* Clear all ai bits for next iteration */
+                       mvpp2_prs_sram_ai_update(&pe, 0,
+                                                MVPP2_PRS_SRAM_AI_MASK);
+                       /* If packet is tagged continue check vlans */
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+               } else {
+                       /* Set result info bits to 'no vlans' */
+                       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+                       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+               }
+               /* Mask/unmask all ports, depending on dsa type */
+               mvpp2_prs_tcam_port_map_set(&pe, port_mask);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(&pe, port, add);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Search for existing single/triple vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_vlan_find(struct mvpp2 *priv,
+                                                  unsigned short tpid, int ai)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+       /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned int ri_bits, ai_bits;
+               bool match;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+                       continue;
+
+               pe->index = tid;
+
+               mvpp2_prs_hw_read(priv, pe);
+               match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid));
+               if (!match)
+                       continue;
+
+               /* Get vlan type */
+               ri_bits = mvpp2_prs_sram_ri_get(pe);
+               ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+
+               /* Get current ai value from tcam */
+               ai_bits = mvpp2_prs_tcam_ai_get(pe);
+               /* Clear double vlan bit */
+               ai_bits &= ~MVPP2_PRS_DBL_VLAN_AI_BIT;
+
+               if (ai != ai_bits)
+                       continue;
+
+               if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+                   ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Add/update single/triple vlan entry */
+static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
+                             unsigned int port_map)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid_aux, tid;
+
+       pe = mvpp2_prs_vlan_find(priv, tpid, ai);
+
+       if (!pe) {
+               /* Create new tcam entry */
+               tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_LAST_FREE_TID,
+                                               MVPP2_PE_FIRST_FREE_TID);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -ENOMEM;
+
+               /* Get last double vlan tid */
+               for (tid_aux = MVPP2_PE_LAST_FREE_TID;
+                    tid_aux >= MVPP2_PE_FIRST_FREE_TID; tid_aux--) {
+                       unsigned int ri_bits;
+
+                       if (!priv->prs_shadow[tid_aux].valid ||
+                           priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+                               continue;
+
+                       pe->index = tid_aux;
+                       mvpp2_prs_hw_read(priv, pe);
+                       ri_bits = mvpp2_prs_sram_ri_get(pe);
+                       if ((ri_bits & MVPP2_PRS_RI_VLAN_MASK) ==
+                           MVPP2_PRS_RI_VLAN_DOUBLE)
+                               break;
+               }
+
+               if (tid <= tid_aux)
+                       return -EINVAL;
+
+               memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+               pe->index = tid;
+
+               mvpp2_prs_match_etype(pe, 0, tpid);
+
+               mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_L2);
+               /* Shift 4 bytes - skip 1 vlan tag */
+               mvpp2_prs_sram_shift_set(pe, MVPP2_VLAN_TAG_LEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+               /* Clear all ai bits for next iteration */
+               mvpp2_prs_sram_ai_update(pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+               if (ai == MVPP2_PRS_SINGLE_VLAN_AI) {
+                       mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_SINGLE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+               } else {
+                       ai |= MVPP2_PRS_DBL_VLAN_AI_BIT;
+                       mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_TRIPLE,
+                                                MVPP2_PRS_RI_VLAN_MASK);
+               }
+               mvpp2_prs_tcam_ai_update(pe, ai, MVPP2_PRS_SRAM_AI_MASK);
+
+               mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+       }
+       /* Update ports' mask */
+       mvpp2_prs_tcam_port_map_set(pe, port_map);
+
+       mvpp2_prs_hw_write(priv, pe);
+
+       kfree(pe);
+
+       return 0;
+}
+
+/* Get first free double vlan ai number */
+static int mvpp2_prs_double_vlan_ai_free_get(struct mvpp2 *priv)
+{
+       int i;
+
+       for (i = 1; i < MVPP2_PRS_DBL_VLANS_MAX; i++) {
+               if (!priv->prs_double_vlans[i])
+                       return i;
+       }
+
+       return -EINVAL;
+}
+
+/* Search for existing double vlan entry */
+static struct mvpp2_prs_entry *mvpp2_prs_double_vlan_find(struct mvpp2 *priv,
+                                                         unsigned short tpid1,
+                                                         unsigned short tpid2)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+
+       /* Go through the all entries with MVPP2_PRS_LU_VLAN */
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned int ri_mask;
+               bool match;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   priv->prs_shadow[tid].lu != MVPP2_PRS_LU_VLAN)
+                       continue;
+
+               pe->index = tid;
+               mvpp2_prs_hw_read(priv, pe);
+
+               match = mvpp2_prs_tcam_data_cmp(pe, 0, swab16(tpid1))
+                       && mvpp2_prs_tcam_data_cmp(pe, 4, swab16(tpid2));
+
+               if (!match)
+                       continue;
+
+               ri_mask = mvpp2_prs_sram_ri_get(pe) & MVPP2_PRS_RI_VLAN_MASK;
+               if (ri_mask == MVPP2_PRS_RI_VLAN_DOUBLE)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Add or update double vlan entry */
+static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
+                                    unsigned short tpid2,
+                                    unsigned int port_map)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid_aux, tid, ai;
+
+       pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
+
+       if (!pe) {
+               /* Create new tcam entry */
+               tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                               MVPP2_PE_LAST_FREE_TID);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -ENOMEM;
+
+               /* Set ai value for new double vlan entry */
+               ai = mvpp2_prs_double_vlan_ai_free_get(priv);
+               if (ai < 0)
+                       return ai;
+
+               /* Get first single/triple vlan tid */
+               for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
+                    tid_aux <= MVPP2_PE_LAST_FREE_TID; tid_aux++) {
+                       unsigned int ri_bits;
+
+                       if (!priv->prs_shadow[tid_aux].valid ||
+                           priv->prs_shadow[tid_aux].lu != MVPP2_PRS_LU_VLAN)
+                               continue;
+
+                       pe->index = tid_aux;
+                       mvpp2_prs_hw_read(priv, pe);
+                       ri_bits = mvpp2_prs_sram_ri_get(pe);
+                       ri_bits &= MVPP2_PRS_RI_VLAN_MASK;
+                       if (ri_bits == MVPP2_PRS_RI_VLAN_SINGLE ||
+                           ri_bits == MVPP2_PRS_RI_VLAN_TRIPLE)
+                               break;
+               }
+
+               if (tid >= tid_aux)
+                       return -ERANGE;
+
+               memset(pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
+               pe->index = tid;
+
+               priv->prs_double_vlans[ai] = true;
+
+               mvpp2_prs_match_etype(pe, 0, tpid1);
+               mvpp2_prs_match_etype(pe, 4, tpid2);
+
+               mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_VLAN);
+               /* Shift 8 bytes - skip 2 vlan tags */
+               mvpp2_prs_sram_shift_set(pe, 2 * MVPP2_VLAN_TAG_LEN,
+                                        MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+               mvpp2_prs_sram_ri_update(pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+                                        MVPP2_PRS_RI_VLAN_MASK);
+               mvpp2_prs_sram_ai_update(pe, ai | MVPP2_PRS_DBL_VLAN_AI_BIT,
+                                        MVPP2_PRS_SRAM_AI_MASK);
+
+               mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_VLAN);
+       }
+
+       /* Update ports' mask */
+       mvpp2_prs_tcam_port_map_set(pe, port_map);
+       mvpp2_prs_hw_write(priv, pe);
+
+       kfree(pe);
+       return 0;
+}
+
+/* IPv4 header parsing for fragmentation and L4 offset */
+static int mvpp2_prs_ip4_proto(struct mvpp2 *priv, unsigned short proto,
+                              unsigned int ri, unsigned int ri_mask)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+           (proto != IPPROTO_IGMP))
+               return -EINVAL;
+
+       /* Fragmented packet */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = tid;
+
+       /* Set next lu to IPv4 */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L4 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct iphdr) - 4,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       mvpp2_prs_sram_ri_update(&pe, ri | MVPP2_PRS_RI_IP_FRAG_MASK,
+                                ri_mask | MVPP2_PRS_RI_IP_FRAG_MASK);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 5, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Not fragmented packet */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       pe.index = tid;
+       /* Clear ri before updating */
+       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 2, 0x00, MVPP2_PRS_TCAM_PROTO_MASK_L);
+       mvpp2_prs_tcam_data_byte_set(&pe, 3, 0x00, MVPP2_PRS_TCAM_PROTO_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* IPv4 L3 multicast or broadcast */
+static int mvpp2_prs_ip4_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+       struct mvpp2_prs_entry pe;
+       int mask, tid;
+
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = tid;
+
+       switch (l3_cast) {
+       case MVPP2_PRS_L3_MULTI_CAST:
+               mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV4_MC,
+                                            MVPP2_PRS_IPV4_MC_MASK);
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+                                        MVPP2_PRS_RI_L3_ADDR_MASK);
+               break;
+       case  MVPP2_PRS_L3_BROAD_CAST:
+               mask = MVPP2_PRS_IPV4_BC_MASK;
+               mvpp2_prs_tcam_data_byte_set(&pe, 0, mask, mask);
+               mvpp2_prs_tcam_data_byte_set(&pe, 1, mask, mask);
+               mvpp2_prs_tcam_data_byte_set(&pe, 2, mask, mask);
+               mvpp2_prs_tcam_data_byte_set(&pe, 3, mask, mask);
+               mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_BCAST,
+                                        MVPP2_PRS_RI_L3_ADDR_MASK);
+               break;
+       default:
+               return -EINVAL;
+       }
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Set entries for protocols over IPv6  */
+static int mvpp2_prs_ip6_proto(struct mvpp2 *priv, unsigned short proto,
+                              unsigned int ri, unsigned int ri_mask)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       if ((proto != IPPROTO_TCP) && (proto != IPPROTO_UDP) &&
+           (proto != IPPROTO_ICMPV6) && (proto != IPPROTO_IPIP))
+               return -EINVAL;
+
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = tid;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, ri, ri_mask);
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct ipv6hdr) - 6,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 0, proto, MVPP2_PRS_TCAM_PROTO_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Write HW */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* IPv6 L3 multicast entry */
+static int mvpp2_prs_ip6_cast(struct mvpp2 *priv, unsigned short l3_cast)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       if (l3_cast != MVPP2_PRS_L3_MULTI_CAST)
+               return -EINVAL;
+
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = tid;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_MCAST,
+                                MVPP2_PRS_RI_L3_ADDR_MASK);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Shift back to IPv6 NH */
+       mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 0, MVPP2_PRS_IPV6_MC,
+                                    MVPP2_PRS_IPV6_MC_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Parser per-port initialization */
+static void mvpp2_prs_hw_port_init(struct mvpp2 *priv, int port, int lu_first,
+                                  int lu_max, int offset)
+{
+       u32 val;
+
+       /* Set lookup ID */
+       val = mvpp2_read(priv, MVPP2_PRS_INIT_LOOKUP_REG);
+       val &= ~MVPP2_PRS_PORT_LU_MASK(port);
+       val |=  MVPP2_PRS_PORT_LU_VAL(port, lu_first);
+       mvpp2_write(priv, MVPP2_PRS_INIT_LOOKUP_REG, val);
+
+       /* Set maximum number of loops for packet received from port */
+       val = mvpp2_read(priv, MVPP2_PRS_MAX_LOOP_REG(port));
+       val &= ~MVPP2_PRS_MAX_LOOP_MASK(port);
+       val |= MVPP2_PRS_MAX_LOOP_VAL(port, lu_max);
+       mvpp2_write(priv, MVPP2_PRS_MAX_LOOP_REG(port), val);
+
+       /* Set initial offset for packet header extraction for the first
+        * searching loop
+        */
+       val = mvpp2_read(priv, MVPP2_PRS_INIT_OFFS_REG(port));
+       val &= ~MVPP2_PRS_INIT_OFF_MASK(port);
+       val |= MVPP2_PRS_INIT_OFF_VAL(port, offset);
+       mvpp2_write(priv, MVPP2_PRS_INIT_OFFS_REG(port), val);
+}
+
+/* Default flow entries initialization for all ports */
+static void mvpp2_prs_def_flow_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int port;
+
+       for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+               memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+               mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+               pe.index = MVPP2_PE_FIRST_DEFAULT_FLOW - port;
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(&pe, 0);
+
+               /* Set flow ID*/
+               mvpp2_prs_sram_ai_update(&pe, port, MVPP2_PRS_FLOW_ID_MASK);
+               mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+               /* Update shadow table and hw entry */
+               mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_FLOWS);
+               mvpp2_prs_hw_write(priv, &pe);
+       }
+}
+
+/* Set default entry for Marvell Header field */
+static void mvpp2_prs_mh_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+       pe.index = MVPP2_PE_MH_DEFAULT;
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MH);
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_MH_SIZE,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MH);
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Set default entires (place holder) for promiscuous, non-promiscuous and
+ * multicast MAC addresses
+ */
+static void mvpp2_prs_mac_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+
+       /* Non-promiscuous mode for all ports - DROP unknown packets */
+       pe.index = MVPP2_PE_MAC_NON_PROMISCUOUS;
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_MAC);
+
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_DROP_MASK,
+                                MVPP2_PRS_RI_DROP_MASK);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* place holders only - no ports */
+       mvpp2_prs_mac_drop_all_set(priv, 0, false);
+       mvpp2_prs_mac_promisc_set(priv, 0, false);
+       mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_ALL, 0, false);
+       mvpp2_prs_mac_multi_set(priv, MVPP2_PE_MAC_MC_IP6, 0, false);
+}
+
+/* Set default entries for various types of dsa packets */
+static void mvpp2_prs_dsa_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+
+       /* None tagged EDSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+                             MVPP2_PRS_EDSA);
+
+       /* Tagged EDSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+       /* None tagged DSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_UNTAGGED,
+                             MVPP2_PRS_DSA);
+
+       /* Tagged DSA entry - place holder */
+       mvpp2_prs_dsa_tag_set(priv, 0, false, MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+       /* None tagged EDSA ethertype entry - place holder*/
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+                                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+
+       /* Tagged EDSA ethertype entry - place holder*/
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, false,
+                                       MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+
+       /* None tagged DSA ethertype entry */
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+                                       MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+
+       /* Tagged DSA ethertype entry */
+       mvpp2_prs_dsa_tag_ethertype_set(priv, 0, true,
+                                       MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+
+       /* Set default entry, in case DSA or EDSA tag not found */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_DSA);
+       pe.index = MVPP2_PE_DSA_DEFAULT;
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+
+       /* Shift 0 bytes */
+       mvpp2_prs_sram_shift_set(&pe, 0, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_MAC);
+
+       /* Clear all sram ai bits for next iteration */
+       mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       mvpp2_prs_hw_write(priv, &pe);
+}
+
+/* Match basic ethertypes */
+static int mvpp2_prs_etype_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       /* Ethertype: PPPoE */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_PPP_SES);
+
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_PPPOE_HDR_SIZE,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_PPPOE_MASK,
+                                MVPP2_PRS_RI_PPPOE_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_PPPOE_MASK,
+                               MVPP2_PRS_RI_PPPOE_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: ARP */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_ARP);
+
+       /* Generate flow in the next iteration*/
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_ARP,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = true;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_ARP,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: LBTD */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, MVPP2_IP_LBDT_TYPE);
+
+       /* Generate flow in the next iteration*/
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                                MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                                MVPP2_PRS_RI_CPU_CODE_MASK |
+                                MVPP2_PRS_RI_UDF3_MASK);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = true;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                               MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                               MVPP2_PRS_RI_CPU_CODE_MASK |
+                               MVPP2_PRS_RI_UDF3_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: IPv4 without options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_IP);
+       mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+                                    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+                                    MVPP2_PRS_IPV4_HEAD_MASK |
+                                    MVPP2_PRS_IPV4_IHL_MASK);
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Skip eth_type + 4 bytes of IP header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: IPv4 with options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       pe.index = tid;
+
+       /* Clear tcam data before updating */
+       pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE(MVPP2_ETH_TYPE_LEN)] = 0x0;
+       pe.tcam.byte[MVPP2_PRS_TCAM_DATA_BYTE_EN(MVPP2_ETH_TYPE_LEN)] = 0x0;
+
+       mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+                                    MVPP2_PRS_IPV4_HEAD,
+                                    MVPP2_PRS_IPV4_HEAD_MASK);
+
+       /* Clear ri before updating */
+       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP4_OPT,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Ethertype: IPv6 without options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, ETH_P_IPV6);
+
+       /* Skip DIP of IPV6 header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 8 +
+                                MVPP2_MAX_L3_ADDR_SIZE,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = false;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_IP6,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default entry for MVPP2_PRS_LU_L2 - Unknown ethtype */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_L2);
+       pe.index = MVPP2_PE_ETH_TYPE_UN;
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Generate flow in the next iteration*/
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Set L3 offset even it's unknown L3 */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_L2);
+       priv->prs_shadow[pe.index].udf = MVPP2_PRS_UDF_L2_DEF;
+       priv->prs_shadow[pe.index].finish = true;
+       mvpp2_prs_shadow_ri_set(priv, pe.index, MVPP2_PRS_RI_L3_UN,
+                               MVPP2_PRS_RI_L3_PROTO_MASK);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Configure vlan entries and detect up to 2 successive VLAN tags.
+ * Possible options:
+ * 0x8100, 0x88A8
+ * 0x8100, 0x8100
+ * 0x8100
+ * 0x88A8
+ */
+static int mvpp2_prs_vlan_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int err;
+
+       priv->prs_double_vlans = devm_kcalloc(&pdev->dev, sizeof(bool),
+                                             MVPP2_PRS_DBL_VLANS_MAX,
+                                             GFP_KERNEL);
+       if (!priv->prs_double_vlans)
+               return -ENOMEM;
+
+       /* Double VLAN: 0x8100, 0x88A8 */
+       err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021AD,
+                                       MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Double VLAN: 0x8100, 0x8100 */
+       err = mvpp2_prs_double_vlan_add(priv, ETH_P_8021Q, ETH_P_8021Q,
+                                       MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Single VLAN: 0x88a8 */
+       err = mvpp2_prs_vlan_add(priv, ETH_P_8021AD, MVPP2_PRS_SINGLE_VLAN_AI,
+                                MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Single VLAN: 0x8100 */
+       err = mvpp2_prs_vlan_add(priv, ETH_P_8021Q, MVPP2_PRS_SINGLE_VLAN_AI,
+                                MVPP2_PRS_PORT_MASK);
+       if (err)
+               return err;
+
+       /* Set default double vlan entry */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+       pe.index = MVPP2_PE_VLAN_DBL;
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+       /* Clear ai for next iterations */
+       mvpp2_prs_sram_ai_update(&pe, 0, MVPP2_PRS_SRAM_AI_MASK);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_DOUBLE,
+                                MVPP2_PRS_RI_VLAN_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_DBL_VLAN_AI_BIT,
+                                MVPP2_PRS_DBL_VLAN_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Set default vlan none entry */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_VLAN);
+       pe.index = MVPP2_PE_VLAN_NONE;
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_L2);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_VLAN_NONE,
+                                MVPP2_PRS_RI_VLAN_MASK);
+
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_VLAN);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Set entries for PPPoE ethertype */
+static int mvpp2_prs_pppoe_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int tid;
+
+       /* IPv4 over PPPoE with options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, PPP_IP);
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4_OPT,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Skip eth_type + 4 bytes of IP header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* IPv4 over PPPoE without options */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       pe.index = tid;
+
+       mvpp2_prs_tcam_data_byte_set(&pe, MVPP2_ETH_TYPE_LEN,
+                                    MVPP2_PRS_IPV4_HEAD | MVPP2_PRS_IPV4_IHL,
+                                    MVPP2_PRS_IPV4_HEAD_MASK |
+                                    MVPP2_PRS_IPV4_IHL_MASK);
+
+       /* Clear ri before updating */
+       pe.sram.word[MVPP2_PRS_SRAM_RI_WORD] = 0x0;
+       pe.sram.word[MVPP2_PRS_SRAM_RI_CTRL_WORD] = 0x0;
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP4,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* IPv6 over PPPoE */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       pe.index = tid;
+
+       mvpp2_prs_match_etype(&pe, 0, PPP_IPV6);
+
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_IP6,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+       /* Skip eth_type + 4 bytes of IPv6 header */
+       mvpp2_prs_sram_shift_set(&pe, MVPP2_ETH_TYPE_LEN + 4,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L3 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Non-IP over PPPoE */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_PPPOE);
+       pe.index = tid;
+
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN,
+                                MVPP2_PRS_RI_L3_PROTO_MASK);
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       /* Set L3 offset even if it's unknown L3 */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L3,
+                                 MVPP2_ETH_TYPE_LEN,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_PPPOE);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Initialize entries for IPv4 */
+static int mvpp2_prs_ip4_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int err;
+
+       /* Set entries for TCP, UDP and IGMP over IPv4 */
+       err = mvpp2_prs_ip4_proto(priv, IPPROTO_TCP, MVPP2_PRS_RI_L4_TCP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip4_proto(priv, IPPROTO_UDP, MVPP2_PRS_RI_L4_UDP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip4_proto(priv, IPPROTO_IGMP,
+                                 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                                 MVPP2_PRS_RI_CPU_CODE_MASK |
+                                 MVPP2_PRS_RI_UDF3_MASK);
+       if (err)
+               return err;
+
+       /* IPv4 Broadcast */
+       err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_BROAD_CAST);
+       if (err)
+               return err;
+
+       /* IPv4 Multicast */
+       err = mvpp2_prs_ip4_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+       if (err)
+               return err;
+
+       /* Default IPv4 entry for unknown protocols */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = MVPP2_PE_IP4_PROTO_UN;
+
+       /* Set next lu to IPv4 */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_sram_shift_set(&pe, 12, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+       /* Set L4 offset */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct iphdr) - 4,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+                                MVPP2_PRS_RI_L4_PROTO_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv4 entry for unicast address */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP4);
+       pe.index = MVPP2_PE_IP4_ADDR_UN;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+                                MVPP2_PRS_RI_L3_ADDR_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV4_DIP_AI_BIT,
+                                MVPP2_PRS_IPV4_DIP_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Initialize entries for IPv6 */
+static int mvpp2_prs_ip6_init(struct mvpp2 *priv)
+{
+       struct mvpp2_prs_entry pe;
+       int tid, err;
+
+       /* Set entries for TCP, UDP and ICMP over IPv6 */
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_TCP,
+                                 MVPP2_PRS_RI_L4_TCP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_UDP,
+                                 MVPP2_PRS_RI_L4_UDP,
+                                 MVPP2_PRS_RI_L4_PROTO_MASK);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_ICMPV6,
+                                 MVPP2_PRS_RI_CPU_CODE_RX_SPEC |
+                                 MVPP2_PRS_RI_UDF3_RX_SPECIAL,
+                                 MVPP2_PRS_RI_CPU_CODE_MASK |
+                                 MVPP2_PRS_RI_UDF3_MASK);
+       if (err)
+               return err;
+
+       /* IPv4 is the last header. This is similar case as 6-TCP or 17-UDP */
+       /* Result Info: UDF7=1, DS lite */
+       err = mvpp2_prs_ip6_proto(priv, IPPROTO_IPIP,
+                                 MVPP2_PRS_RI_UDF7_IP6_LITE,
+                                 MVPP2_PRS_RI_UDF7_MASK);
+       if (err)
+               return err;
+
+       /* IPv6 multicast */
+       err = mvpp2_prs_ip6_cast(priv, MVPP2_PRS_L3_MULTI_CAST);
+       if (err)
+               return err;
+
+       /* Entry for checking hop limit */
+       tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                       MVPP2_PE_LAST_FREE_TID);
+       if (tid < 0)
+               return tid;
+
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = tid;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UN |
+                                MVPP2_PRS_RI_DROP_MASK,
+                                MVPP2_PRS_RI_L3_PROTO_MASK |
+                                MVPP2_PRS_RI_DROP_MASK);
+
+       mvpp2_prs_tcam_data_byte_set(&pe, 1, 0x00, MVPP2_PRS_IPV6_HOP_MASK);
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv6 entry for unknown protocols */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = MVPP2_PE_IP6_PROTO_UN;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+                                MVPP2_PRS_RI_L4_PROTO_MASK);
+       /* Set L4 offset relatively to our current place */
+       mvpp2_prs_sram_offset_set(&pe, MVPP2_PRS_SRAM_UDF_TYPE_L4,
+                                 sizeof(struct ipv6hdr) - 4,
+                                 MVPP2_PRS_SRAM_OP_SEL_UDF_ADD);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv6 entry for unknown ext protocols */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = MVPP2_PE_IP6_EXT_PROTO_UN;
+
+       /* Finished: go to flowid generation */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_FLOWS);
+       mvpp2_prs_sram_bits_set(&pe, MVPP2_PRS_SRAM_LU_GEN_BIT, 1);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L4_OTHER,
+                                MVPP2_PRS_RI_L4_PROTO_MASK);
+
+       mvpp2_prs_tcam_ai_update(&pe, MVPP2_PRS_IPV6_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP4);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       /* Default IPv6 entry for unicast address */
+       memset(&pe, 0, sizeof(struct mvpp2_prs_entry));
+       mvpp2_prs_tcam_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       pe.index = MVPP2_PE_IP6_ADDR_UN;
+
+       /* Finished: go to IPv6 again */
+       mvpp2_prs_sram_next_lu_set(&pe, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_sram_ri_update(&pe, MVPP2_PRS_RI_L3_UCAST,
+                                MVPP2_PRS_RI_L3_ADDR_MASK);
+       mvpp2_prs_sram_ai_update(&pe, MVPP2_PRS_IPV6_NO_EXT_AI_BIT,
+                                MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Shift back to IPV6 NH */
+       mvpp2_prs_sram_shift_set(&pe, -18, MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+       mvpp2_prs_tcam_ai_update(&pe, 0, MVPP2_PRS_IPV6_NO_EXT_AI_BIT);
+       /* Unmask all ports */
+       mvpp2_prs_tcam_port_map_set(&pe, MVPP2_PRS_PORT_MASK);
+
+       /* Update shadow table and hw entry */
+       mvpp2_prs_shadow_set(priv, pe.index, MVPP2_PRS_LU_IP6);
+       mvpp2_prs_hw_write(priv, &pe);
+
+       return 0;
+}
+
+/* Parser default initialization */
+static int mvpp2_prs_default_init(struct platform_device *pdev,
+                                 struct mvpp2 *priv)
+{
+       int err, index, i;
+
+       /* Enable tcam table */
+       mvpp2_write(priv, MVPP2_PRS_TCAM_CTRL_REG, MVPP2_PRS_TCAM_EN_MASK);
+
+       /* Clear all tcam and sram entries */
+       for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++) {
+               mvpp2_write(priv, MVPP2_PRS_TCAM_IDX_REG, index);
+               for (i = 0; i < MVPP2_PRS_TCAM_WORDS; i++)
+                       mvpp2_write(priv, MVPP2_PRS_TCAM_DATA_REG(i), 0);
+
+               mvpp2_write(priv, MVPP2_PRS_SRAM_IDX_REG, index);
+               for (i = 0; i < MVPP2_PRS_SRAM_WORDS; i++)
+                       mvpp2_write(priv, MVPP2_PRS_SRAM_DATA_REG(i), 0);
+       }
+
+       /* Invalidate all tcam entries */
+       for (index = 0; index < MVPP2_PRS_TCAM_SRAM_SIZE; index++)
+               mvpp2_prs_hw_inv(priv, index);
+
+       priv->prs_shadow = devm_kcalloc(&pdev->dev, MVPP2_PRS_TCAM_SRAM_SIZE,
+                                       sizeof(struct mvpp2_prs_shadow),
+                                       GFP_KERNEL);
+       if (!priv->prs_shadow)
+               return -ENOMEM;
+
+       /* Always start from lookup = 0 */
+       for (index = 0; index < MVPP2_MAX_PORTS; index++)
+               mvpp2_prs_hw_port_init(priv, index, MVPP2_PRS_LU_MH,
+                                      MVPP2_PRS_PORT_LU_MAX, 0);
+
+       mvpp2_prs_def_flow_init(priv);
+
+       mvpp2_prs_mh_init(priv);
+
+       mvpp2_prs_mac_init(priv);
+
+       mvpp2_prs_dsa_init(priv);
+
+       err = mvpp2_prs_etype_init(priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_vlan_init(pdev, priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_pppoe_init(priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip6_init(priv);
+       if (err)
+               return err;
+
+       err = mvpp2_prs_ip4_init(priv);
+       if (err)
+               return err;
+
+       return 0;
+}
+
+/* Compare MAC DA with tcam entry data */
+static bool mvpp2_prs_mac_range_equals(struct mvpp2_prs_entry *pe,
+                                      const u8 *da, unsigned char *mask)
+{
+       unsigned char tcam_byte, tcam_mask;
+       int index;
+
+       for (index = 0; index < ETH_ALEN; index++) {
+               mvpp2_prs_tcam_data_byte_get(pe, index, &tcam_byte, &tcam_mask);
+               if (tcam_mask != mask[index])
+                       return false;
+
+               if ((tcam_mask & tcam_byte) != (da[index] & mask[index]))
+                       return false;
+       }
+
+       return true;
+}
+
+/* Find tcam entry with matched pair <MAC DA, port> */
+static struct mvpp2_prs_entry *
+mvpp2_prs_mac_da_range_find(struct mvpp2 *priv, int pmap, const u8 *da,
+                           unsigned char *mask, int udf_type)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+       if (!pe)
+               return NULL;
+       mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+
+       /* Go through the all entires with MVPP2_PRS_LU_MAC */
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned int entry_pmap;
+
+               if (!priv->prs_shadow[tid].valid ||
+                   (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+                   (priv->prs_shadow[tid].udf != udf_type))
+                       continue;
+
+               pe->index = tid;
+               mvpp2_prs_hw_read(priv, pe);
+               entry_pmap = mvpp2_prs_tcam_port_map_get(pe);
+
+               if (mvpp2_prs_mac_range_equals(pe, da, mask) &&
+                   entry_pmap == pmap)
+                       return pe;
+       }
+       kfree(pe);
+
+       return NULL;
+}
+
+/* Update parser's mac da entry */
+static int mvpp2_prs_mac_da_accept(struct mvpp2 *priv, int port,
+                                  const u8 *da, bool add)
+{
+       struct mvpp2_prs_entry *pe;
+       unsigned int pmap, len, ri;
+       unsigned char mask[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+       int tid;
+
+       /* Scan TCAM and see if entry with this <MAC DA, port> already exist */
+       pe = mvpp2_prs_mac_da_range_find(priv, (1 << port), da, mask,
+                                        MVPP2_PRS_UDF_MAC_DEF);
+
+       /* No such entry */
+       if (!pe) {
+               if (!add)
+                       return 0;
+
+               /* Create new TCAM entry */
+               /* Find first range mac entry*/
+               for (tid = MVPP2_PE_FIRST_FREE_TID;
+                    tid <= MVPP2_PE_LAST_FREE_TID; tid++)
+                       if (priv->prs_shadow[tid].valid &&
+                           (priv->prs_shadow[tid].lu == MVPP2_PRS_LU_MAC) &&
+                           (priv->prs_shadow[tid].udf ==
+                                                      MVPP2_PRS_UDF_MAC_RANGE))
+                               break;
+
+               /* Go through the all entries from first to last */
+               tid = mvpp2_prs_tcam_first_free(priv, MVPP2_PE_FIRST_FREE_TID,
+                                               tid - 1);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -1;
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
+               pe->index = tid;
+
+               /* Mask all ports */
+               mvpp2_prs_tcam_port_map_set(pe, 0);
+       }
+
+       /* Update port mask */
+       mvpp2_prs_tcam_port_set(pe, port, add);
+
+       /* Invalidate the entry if no ports are left enabled */
+       pmap = mvpp2_prs_tcam_port_map_get(pe);
+       if (pmap == 0) {
+               if (add) {
+                       kfree(pe);
+                       return -1;
+               }
+               mvpp2_prs_hw_inv(priv, pe->index);
+               priv->prs_shadow[pe->index].valid = false;
+               kfree(pe);
+               return 0;
+       }
+
+       /* Continue - set next lookup */
+       mvpp2_prs_sram_next_lu_set(pe, MVPP2_PRS_LU_DSA);
+
+       /* Set match on DA */
+       len = ETH_ALEN;
+       while (len--)
+               mvpp2_prs_tcam_data_byte_set(pe, len, da[len], 0xff);
+
+       /* Set result info bits */
+       if (is_broadcast_ether_addr(da))
+               ri = MVPP2_PRS_RI_L2_BCAST;
+       else if (is_multicast_ether_addr(da))
+               ri = MVPP2_PRS_RI_L2_MCAST;
+       else
+               ri = MVPP2_PRS_RI_L2_UCAST | MVPP2_PRS_RI_MAC_ME_MASK;
+
+       mvpp2_prs_sram_ri_update(pe, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+                                MVPP2_PRS_RI_MAC_ME_MASK);
+       mvpp2_prs_shadow_ri_set(priv, pe->index, ri, MVPP2_PRS_RI_L2_CAST_MASK |
+                               MVPP2_PRS_RI_MAC_ME_MASK);
+
+       /* Shift to ethertype */
+       mvpp2_prs_sram_shift_set(pe, 2 * ETH_ALEN,
+                                MVPP2_PRS_SRAM_OP_SEL_SHIFT_ADD);
+
+       /* Update shadow table and hw entry */
+       priv->prs_shadow[pe->index].udf = MVPP2_PRS_UDF_MAC_DEF;
+       mvpp2_prs_shadow_set(priv, pe->index, MVPP2_PRS_LU_MAC);
+       mvpp2_prs_hw_write(priv, pe);
+
+       kfree(pe);
+
+       return 0;
+}
+
+static int mvpp2_prs_update_mac_da(struct net_device *dev, const u8 *da)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int err;
+
+       /* Remove old parser entry */
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id, dev->dev_addr,
+                                     false);
+       if (err)
+               return err;
+
+       /* Add new parser entry */
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id, da, true);
+       if (err)
+               return err;
+
+       /* Set addr in the device */
+       ether_addr_copy(dev->dev_addr, da);
+
+       return 0;
+}
+
+/* Delete all port's multicast simple (not range) entries */
+static void mvpp2_prs_mcast_del_all(struct mvpp2 *priv, int port)
+{
+       struct mvpp2_prs_entry pe;
+       int index, tid;
+
+       for (tid = MVPP2_PE_FIRST_FREE_TID;
+            tid <= MVPP2_PE_LAST_FREE_TID; tid++) {
+               unsigned char da[ETH_ALEN], da_mask[ETH_ALEN];
+
+               if (!priv->prs_shadow[tid].valid ||
+                   (priv->prs_shadow[tid].lu != MVPP2_PRS_LU_MAC) ||
+                   (priv->prs_shadow[tid].udf != MVPP2_PRS_UDF_MAC_DEF))
+                       continue;
+
+               /* Only simple mac entries */
+               pe.index = tid;
+               mvpp2_prs_hw_read(priv, &pe);
+
+               /* Read mac addr from entry */
+               for (index = 0; index < ETH_ALEN; index++)
+                       mvpp2_prs_tcam_data_byte_get(&pe, index, &da[index],
+                                                    &da_mask[index]);
+
+               if (is_multicast_ether_addr(da) && !is_broadcast_ether_addr(da))
+                       /* Delete this entry */
+                       mvpp2_prs_mac_da_accept(priv, port, da, false);
+       }
+}
+
+static int mvpp2_prs_tag_mode_set(struct mvpp2 *priv, int port, int type)
+{
+       switch (type) {
+       case MVPP2_TAG_TYPE_EDSA:
+               /* Add port to EDSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+               /* Remove port from DSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+               break;
+
+       case MVPP2_TAG_TYPE_DSA:
+               /* Add port to DSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, true,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+               /* Remove port from EDSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+               break;
+
+       case MVPP2_TAG_TYPE_MH:
+       case MVPP2_TAG_TYPE_NONE:
+               /* Remove port form EDSA and DSA entries */
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_DSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_TAGGED, MVPP2_PRS_EDSA);
+               mvpp2_prs_dsa_tag_set(priv, port, false,
+                                     MVPP2_PRS_UNTAGGED, MVPP2_PRS_EDSA);
+               break;
+
+       default:
+               if ((type < 0) || (type > MVPP2_TAG_TYPE_EDSA))
+                       return -EINVAL;
+       }
+
+       return 0;
+}
+
+/* Set prs flow for the port */
+static int mvpp2_prs_def_flow(struct mvpp2_port *port)
+{
+       struct mvpp2_prs_entry *pe;
+       int tid;
+
+       pe = mvpp2_prs_flow_find(port->priv, port->id);
+
+       /* Such entry not exist */
+       if (!pe) {
+               /* Go through the all entires from last to first */
+               tid = mvpp2_prs_tcam_first_free(port->priv,
+                                               MVPP2_PE_LAST_FREE_TID,
+                                              MVPP2_PE_FIRST_FREE_TID);
+               if (tid < 0)
+                       return tid;
+
+               pe = kzalloc(sizeof(*pe), GFP_KERNEL);
+               if (!pe)
+                       return -ENOMEM;
+
+               mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_FLOWS);
+               pe->index = tid;
+
+               /* Set flow ID*/
+               mvpp2_prs_sram_ai_update(pe, port->id, MVPP2_PRS_FLOW_ID_MASK);
+               mvpp2_prs_sram_bits_set(pe, MVPP2_PRS_SRAM_LU_DONE_BIT, 1);
+
+               /* Update shadow table */
+               mvpp2_prs_shadow_set(port->priv, pe->index, MVPP2_PRS_LU_FLOWS);
+       }
+
+       mvpp2_prs_tcam_port_map_set(pe, (1 << port->id));
+       mvpp2_prs_hw_write(port->priv, pe);
+       kfree(pe);
+
+       return 0;
+}
+
+/* Classifier configuration routines */
+
+/* Update classification flow table registers */
+static void mvpp2_cls_flow_write(struct mvpp2 *priv,
+                                struct mvpp2_cls_flow_entry *fe)
+{
+       mvpp2_write(priv, MVPP2_CLS_FLOW_INDEX_REG, fe->index);
+       mvpp2_write(priv, MVPP2_CLS_FLOW_TBL0_REG,  fe->data[0]);
+       mvpp2_write(priv, MVPP2_CLS_FLOW_TBL1_REG,  fe->data[1]);
+       mvpp2_write(priv, MVPP2_CLS_FLOW_TBL2_REG,  fe->data[2]);
+}
+
+/* Update classification lookup table register */
+static void mvpp2_cls_lookup_write(struct mvpp2 *priv,
+                                  struct mvpp2_cls_lookup_entry *le)
+{
+       u32 val;
+
+       val = (le->way << MVPP2_CLS_LKP_INDEX_WAY_OFFS) | le->lkpid;
+       mvpp2_write(priv, MVPP2_CLS_LKP_INDEX_REG, val);
+       mvpp2_write(priv, MVPP2_CLS_LKP_TBL_REG, le->data);
+}
+
+/* Classifier default initialization */
+static void mvpp2_cls_init(struct mvpp2 *priv)
+{
+       struct mvpp2_cls_lookup_entry le;
+       struct mvpp2_cls_flow_entry fe;
+       int index;
+
+       /* Enable classifier */
+       mvpp2_write(priv, MVPP2_CLS_MODE_REG, MVPP2_CLS_MODE_ACTIVE_MASK);
+
+       /* Clear classifier flow table */
+       memset(&fe.data, 0, MVPP2_CLS_FLOWS_TBL_DATA_WORDS);
+       for (index = 0; index < MVPP2_CLS_FLOWS_TBL_SIZE; index++) {
+               fe.index = index;
+               mvpp2_cls_flow_write(priv, &fe);
+       }
+
+       /* Clear classifier lookup table */
+       le.data = 0;
+       for (index = 0; index < MVPP2_CLS_LKP_TBL_SIZE; index++) {
+               le.lkpid = index;
+               le.way = 0;
+               mvpp2_cls_lookup_write(priv, &le);
+
+               le.way = 1;
+               mvpp2_cls_lookup_write(priv, &le);
+       }
+}
+
+static void mvpp2_cls_port_config(struct mvpp2_port *port)
+{
+       struct mvpp2_cls_lookup_entry le;
+       u32 val;
+
+       /* Set way for the port */
+       val = mvpp2_read(port->priv, MVPP2_CLS_PORT_WAY_REG);
+       val &= ~MVPP2_CLS_PORT_WAY_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_CLS_PORT_WAY_REG, val);
+
+       /* Pick the entry to be accessed in lookup ID decoding table
+        * according to the way and lkpid.
+        */
+       le.lkpid = port->id;
+       le.way = 0;
+       le.data = 0;
+
+       /* Set initial CPU queue for receiving packets */
+       le.data &= ~MVPP2_CLS_LKP_TBL_RXQ_MASK;
+       le.data |= port->first_rxq;
+
+       /* Disable classification engines */
+       le.data &= ~MVPP2_CLS_LKP_TBL_LOOKUP_EN_MASK;
+
+       /* Update lookup ID table entry */
+       mvpp2_cls_lookup_write(port->priv, &le);
+}
+
+/* Set CPU queue number for oversize packets */
+static void mvpp2_cls_oversize_rxq_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       mvpp2_write(port->priv, MVPP2_CLS_OVERSIZE_RXQ_LOW_REG(port->id),
+                   port->first_rxq & MVPP2_CLS_OVERSIZE_RXQ_LOW_MASK);
+
+       mvpp2_write(port->priv, MVPP2_CLS_SWFWD_P2HQ_REG(port->id),
+                   (port->first_rxq >> MVPP2_CLS_OVERSIZE_RXQ_LOW_BITS));
+
+       val = mvpp2_read(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG);
+       val |= MVPP2_CLS_SWFWD_PCTRL_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_CLS_SWFWD_PCTRL_REG, val);
+}
+
+/* Buffer Manager configuration routines */
+
+/* Create pool */
+static int mvpp2_bm_pool_create(struct platform_device *pdev,
+                               struct mvpp2 *priv,
+                               struct mvpp2_bm_pool *bm_pool, int size)
+{
+       int size_bytes;
+       u32 val;
+
+       size_bytes = sizeof(u32) * size;
+       bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes,
+                                               &bm_pool->phys_addr,
+                                               GFP_KERNEL);
+       if (!bm_pool->virt_addr)
+               return -ENOMEM;
+
+       if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVPP2_BM_POOL_PTR_ALIGN)) {
+               dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr,
+                                 bm_pool->phys_addr);
+               dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n",
+                       bm_pool->id, MVPP2_BM_POOL_PTR_ALIGN);
+               return -ENOMEM;
+       }
+
+       mvpp2_write(priv, MVPP2_BM_POOL_BASE_REG(bm_pool->id),
+                   bm_pool->phys_addr);
+       mvpp2_write(priv, MVPP2_BM_POOL_SIZE_REG(bm_pool->id), size);
+
+       val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+       val |= MVPP2_BM_START_MASK;
+       mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+       bm_pool->type = MVPP2_BM_FREE;
+       bm_pool->size = size;
+       bm_pool->pkt_size = 0;
+       bm_pool->buf_num = 0;
+       atomic_set(&bm_pool->in_use, 0);
+       spin_lock_init(&bm_pool->lock);
+
+       return 0;
+}
+
+/* Set pool buffer size */
+static void mvpp2_bm_pool_bufsize_set(struct mvpp2 *priv,
+                                     struct mvpp2_bm_pool *bm_pool,
+                                     int buf_size)
+{
+       u32 val;
+
+       bm_pool->buf_size = buf_size;
+
+       val = ALIGN(buf_size, 1 << MVPP2_POOL_BUF_SIZE_OFFSET);
+       mvpp2_write(priv, MVPP2_POOL_BUF_SIZE_REG(bm_pool->id), val);
+}
+
+/* Free all buffers from the pool */
+static void mvpp2_bm_bufs_free(struct mvpp2 *priv, struct mvpp2_bm_pool *bm_pool)
+{
+       int i;
+
+       for (i = 0; i < bm_pool->buf_num; i++) {
+               u32 vaddr;
+
+               /* Get buffer virtual adress (indirect access) */
+               mvpp2_read(priv, MVPP2_BM_PHY_ALLOC_REG(bm_pool->id));
+               vaddr = mvpp2_read(priv, MVPP2_BM_VIRT_ALLOC_REG);
+               if (!vaddr)
+                       break;
+               dev_kfree_skb_any((struct sk_buff *)vaddr);
+       }
+
+       /* Update BM driver with number of buffers removed from pool */
+       bm_pool->buf_num -= i;
+}
+
+/* Cleanup pool */
+static int mvpp2_bm_pool_destroy(struct platform_device *pdev,
+                                struct mvpp2 *priv,
+                                struct mvpp2_bm_pool *bm_pool)
+{
+       u32 val;
+
+       mvpp2_bm_bufs_free(priv, bm_pool);
+       if (bm_pool->buf_num) {
+               WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id);
+               return 0;
+       }
+
+       val = mvpp2_read(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id));
+       val |= MVPP2_BM_STOP_MASK;
+       mvpp2_write(priv, MVPP2_BM_POOL_CTRL_REG(bm_pool->id), val);
+
+       dma_free_coherent(&pdev->dev, sizeof(u32) * bm_pool->size,
+                         bm_pool->virt_addr,
+                         bm_pool->phys_addr);
+       return 0;
+}
+
+static int mvpp2_bm_pools_init(struct platform_device *pdev,
+                              struct mvpp2 *priv)
+{
+       int i, err, size;
+       struct mvpp2_bm_pool *bm_pool;
+
+       /* Create all pools with maximum size */
+       size = MVPP2_BM_POOL_SIZE_MAX;
+       for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+               bm_pool = &priv->bm_pools[i];
+               bm_pool->id = i;
+               err = mvpp2_bm_pool_create(pdev, priv, bm_pool, size);
+               if (err)
+                       goto err_unroll_pools;
+               mvpp2_bm_pool_bufsize_set(priv, bm_pool, 0);
+       }
+       return 0;
+
+err_unroll_pools:
+       dev_err(&pdev->dev, "failed to create BM pool %d, size %d\n", i, size);
+       for (i = i - 1; i >= 0; i--)
+               mvpp2_bm_pool_destroy(pdev, priv, &priv->bm_pools[i]);
+       return err;
+}
+
+static int mvpp2_bm_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+       int i, err;
+
+       for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+               /* Mask BM all interrupts */
+               mvpp2_write(priv, MVPP2_BM_INTR_MASK_REG(i), 0);
+               /* Clear BM cause register */
+               mvpp2_write(priv, MVPP2_BM_INTR_CAUSE_REG(i), 0);
+       }
+
+       /* Allocate and initialize BM pools */
+       priv->bm_pools = devm_kcalloc(&pdev->dev, MVPP2_BM_POOLS_NUM,
+                                    sizeof(struct mvpp2_bm_pool), GFP_KERNEL);
+       if (!priv->bm_pools)
+               return -ENOMEM;
+
+       err = mvpp2_bm_pools_init(pdev, priv);
+       if (err < 0)
+               return err;
+       return 0;
+}
+
+/* Attach long pool to rxq */
+static void mvpp2_rxq_long_pool_set(struct mvpp2_port *port,
+                                   int lrxq, int long_pool)
+{
+       u32 val;
+       int prxq;
+
+       /* Get queue physical ID */
+       prxq = port->rxqs[lrxq]->id;
+
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~MVPP2_RXQ_POOL_LONG_MASK;
+       val |= ((long_pool << MVPP2_RXQ_POOL_LONG_OFFS) &
+                   MVPP2_RXQ_POOL_LONG_MASK);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Attach short pool to rxq */
+static void mvpp2_rxq_short_pool_set(struct mvpp2_port *port,
+                                    int lrxq, int short_pool)
+{
+       u32 val;
+       int prxq;
+
+       /* Get queue physical ID */
+       prxq = port->rxqs[lrxq]->id;
+
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~MVPP2_RXQ_POOL_SHORT_MASK;
+       val |= ((short_pool << MVPP2_RXQ_POOL_SHORT_OFFS) &
+                   MVPP2_RXQ_POOL_SHORT_MASK);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Allocate skb for BM pool */
+static struct sk_buff *mvpp2_skb_alloc(struct mvpp2_port *port,
+                                      struct mvpp2_bm_pool *bm_pool,
+                                      dma_addr_t *buf_phys_addr,
+                                      gfp_t gfp_mask)
+{
+       struct sk_buff *skb;
+       dma_addr_t phys_addr;
+
+       skb = __dev_alloc_skb(bm_pool->pkt_size, gfp_mask);
+       if (!skb)
+               return NULL;
+
+       phys_addr = dma_map_single(port->dev->dev.parent, skb->head,
+                                  MVPP2_RX_BUF_SIZE(bm_pool->pkt_size),
+                                   DMA_FROM_DEVICE);
+       if (unlikely(dma_mapping_error(port->dev->dev.parent, phys_addr))) {
+               dev_kfree_skb_any(skb);
+               return NULL;
+       }
+       *buf_phys_addr = phys_addr;
+
+       return skb;
+}
+
+/* Set pool number in a BM cookie */
+static inline u32 mvpp2_bm_cookie_pool_set(u32 cookie, int pool)
+{
+       u32 bm;
+
+       bm = cookie & ~(0xFF << MVPP2_BM_COOKIE_POOL_OFFS);
+       bm |= ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS);
+
+       return bm;
+}
+
+/* Get pool number from a BM cookie */
+static inline int mvpp2_bm_cookie_pool_get(u32 cookie)
+{
+       return (cookie >> MVPP2_BM_COOKIE_POOL_OFFS) & 0xFF;
+}
+
+/* Release buffer to BM */
+static inline void mvpp2_bm_pool_put(struct mvpp2_port *port, int pool,
+                                    u32 buf_phys_addr, u32 buf_virt_addr)
+{
+       mvpp2_write(port->priv, MVPP2_BM_VIRT_RLS_REG, buf_virt_addr);
+       mvpp2_write(port->priv, MVPP2_BM_PHY_RLS_REG(pool), buf_phys_addr);
+}
+
+/* Release multicast buffer */
+static void mvpp2_bm_pool_mc_put(struct mvpp2_port *port, int pool,
+                                u32 buf_phys_addr, u32 buf_virt_addr,
+                                int mc_id)
+{
+       u32 val = 0;
+
+       val |= (mc_id & MVPP2_BM_MC_ID_MASK);
+       mvpp2_write(port->priv, MVPP2_BM_MC_RLS_REG, val);
+
+       mvpp2_bm_pool_put(port, pool,
+                         buf_phys_addr | MVPP2_BM_PHY_RLS_MC_BUFF_MASK,
+                         buf_virt_addr);
+}
+
+/* Refill BM pool */
+static void mvpp2_pool_refill(struct mvpp2_port *port, u32 bm,
+                             u32 phys_addr, u32 cookie)
+{
+       int pool = mvpp2_bm_cookie_pool_get(bm);
+
+       mvpp2_bm_pool_put(port, pool, phys_addr, cookie);
+}
+
+/* Allocate buffers for the pool */
+static int mvpp2_bm_bufs_add(struct mvpp2_port *port,
+                            struct mvpp2_bm_pool *bm_pool, int buf_num)
+{
+       struct sk_buff *skb;
+       int i, buf_size, total_size;
+       u32 bm;
+       dma_addr_t phys_addr;
+
+       buf_size = MVPP2_RX_BUF_SIZE(bm_pool->pkt_size);
+       total_size = MVPP2_RX_TOTAL_SIZE(buf_size);
+
+       if (buf_num < 0 ||
+           (buf_num + bm_pool->buf_num > bm_pool->size)) {
+               netdev_err(port->dev,
+                          "cannot allocate %d buffers for pool %d\n",
+                          buf_num, bm_pool->id);
+               return 0;
+       }
+
+       bm = mvpp2_bm_cookie_pool_set(0, bm_pool->id);
+       for (i = 0; i < buf_num; i++) {
+               skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_KERNEL);
+               if (!skb)
+                       break;
+
+               mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+       }
+
+       /* Update BM driver with number of buffers added to pool */
+       bm_pool->buf_num += i;
+       bm_pool->in_use_thresh = bm_pool->buf_num / 4;
+
+       netdev_dbg(port->dev,
+                  "%s pool %d: pkt_size=%4d, buf_size=%4d, total_size=%4d\n",
+                  bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+                  bm_pool->id, bm_pool->pkt_size, buf_size, total_size);
+
+       netdev_dbg(port->dev,
+                  "%s pool %d: %d of %d buffers added\n",
+                  bm_pool->type == MVPP2_BM_SWF_SHORT ? "short" : " long",
+                  bm_pool->id, i, buf_num);
+       return i;
+}
+
+/* Notify the driver that BM pool is being used as specific type and return the
+ * pool pointer on success
+ */
+static struct mvpp2_bm_pool *
+mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
+                 int pkt_size)
+{
+       unsigned long flags = 0;
+       struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
+       int num;
+
+       if (new_pool->type != MVPP2_BM_FREE && new_pool->type != type) {
+               netdev_err(port->dev, "mixing pool types is forbidden\n");
+               return NULL;
+       }
+
+       spin_lock_irqsave(&new_pool->lock, flags);
+
+       if (new_pool->type == MVPP2_BM_FREE)
+               new_pool->type = type;
+
+       /* Allocate buffers in case BM pool is used as long pool, but packet
+        * size doesn't match MTU or BM pool hasn't being used yet
+        */
+       if (((type == MVPP2_BM_SWF_LONG) && (pkt_size > new_pool->pkt_size)) ||
+           (new_pool->pkt_size == 0)) {
+               int pkts_num;
+
+               /* Set default buffer number or free all the buffers in case
+                * the pool is not empty
+                */
+               pkts_num = new_pool->buf_num;
+               if (pkts_num == 0)
+                       pkts_num = type == MVPP2_BM_SWF_LONG ?
+                                  MVPP2_BM_LONG_BUF_NUM :
+                                  MVPP2_BM_SHORT_BUF_NUM;
+               else
+                       mvpp2_bm_bufs_free(port->priv, new_pool);
+
+               new_pool->pkt_size = pkt_size;
+
+               /* Allocate buffers for this pool */
+               num = mvpp2_bm_bufs_add(port, new_pool, pkts_num);
+               if (num != pkts_num) {
+                       WARN(1, "pool %d: %d of %d allocated\n",
+                            new_pool->id, num, pkts_num);
+                       /* We need to undo the bufs_add() allocations */
+                       spin_unlock_irqrestore(&new_pool->lock, flags);
+                       return NULL;
+               }
+       }
+
+       mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
+                                 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
+
+       spin_unlock_irqrestore(&new_pool->lock, flags);
+
+       return new_pool;
+}
+
+/* Initialize pools for swf */
+static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
+{
+       unsigned long flags = 0;
+       int rxq;
+
+       if (!port->pool_long) {
+               port->pool_long =
+                      mvpp2_bm_pool_use(port, MVPP2_BM_SWF_LONG_POOL(port->id),
+                                        MVPP2_BM_SWF_LONG,
+                                        port->pkt_size);
+               if (!port->pool_long)
+                       return -ENOMEM;
+
+               spin_lock_irqsave(&port->pool_long->lock, flags);
+               port->pool_long->port_map |= (1 << port->id);
+               spin_unlock_irqrestore(&port->pool_long->lock, flags);
+
+               for (rxq = 0; rxq < rxq_number; rxq++)
+                       mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
+       }
+
+       if (!port->pool_short) {
+               port->pool_short =
+                       mvpp2_bm_pool_use(port, MVPP2_BM_SWF_SHORT_POOL,
+                                         MVPP2_BM_SWF_SHORT,
+                                         MVPP2_BM_SHORT_PKT_SIZE);
+               if (!port->pool_short)
+                       return -ENOMEM;
+
+               spin_lock_irqsave(&port->pool_short->lock, flags);
+               port->pool_short->port_map |= (1 << port->id);
+               spin_unlock_irqrestore(&port->pool_short->lock, flags);
+
+               for (rxq = 0; rxq < rxq_number; rxq++)
+                       mvpp2_rxq_short_pool_set(port, rxq,
+                                                port->pool_short->id);
+       }
+
+       return 0;
+}
+
+static int mvpp2_bm_update_mtu(struct net_device *dev, int mtu)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_bm_pool *port_pool = port->pool_long;
+       int num, pkts_num = port_pool->buf_num;
+       int pkt_size = MVPP2_RX_PKT_SIZE(mtu);
+
+       /* Update BM pool with new buffer size */
+       mvpp2_bm_bufs_free(port->priv, port_pool);
+       if (port_pool->buf_num) {
+               WARN(1, "cannot free all buffers in pool %d\n", port_pool->id);
+               return -EIO;
+       }
+
+       port_pool->pkt_size = pkt_size;
+       num = mvpp2_bm_bufs_add(port, port_pool, pkts_num);
+       if (num != pkts_num) {
+               WARN(1, "pool %d: %d of %d allocated\n",
+                    port_pool->id, num, pkts_num);
+               return -EIO;
+       }
+
+       mvpp2_bm_pool_bufsize_set(port->priv, port_pool,
+                                 MVPP2_RX_BUF_SIZE(port_pool->pkt_size));
+       dev->mtu = mtu;
+       netdev_update_features(dev);
+       return 0;
+}
+
+static inline void mvpp2_interrupts_enable(struct mvpp2_port *port)
+{
+       int cpu, cpu_mask = 0;
+
+       for_each_present_cpu(cpu)
+               cpu_mask |= 1 << cpu;
+       mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+                   MVPP2_ISR_ENABLE_INTERRUPT(cpu_mask));
+}
+
+static inline void mvpp2_interrupts_disable(struct mvpp2_port *port)
+{
+       int cpu, cpu_mask = 0;
+
+       for_each_present_cpu(cpu)
+               cpu_mask |= 1 << cpu;
+       mvpp2_write(port->priv, MVPP2_ISR_ENABLE_REG(port->id),
+                   MVPP2_ISR_DISABLE_INTERRUPT(cpu_mask));
+}
+
+/* Mask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_mask(void *arg)
+{
+       struct mvpp2_port *port = arg;
+
+       mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 0);
+}
+
+/* Unmask the current CPU's Rx/Tx interrupts */
+static void mvpp2_interrupts_unmask(void *arg)
+{
+       struct mvpp2_port *port = arg;
+
+       mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
+                   (MVPP2_CAUSE_MISC_SUM_MASK |
+                    MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
+                    MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
+}
+
+/* Port configuration routines */
+
+static void mvpp2_port_mii_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_2_REG);
+
+       switch (port->phy_interface) {
+       case PHY_INTERFACE_MODE_SGMII:
+               val |= MVPP2_GMAC_INBAND_AN_MASK;
+               break;
+       case PHY_INTERFACE_MODE_RGMII:
+               val |= MVPP2_GMAC_PORT_RGMII_MASK;
+       default:
+               val &= ~MVPP2_GMAC_PCS_ENABLE_MASK;
+       }
+
+       writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+}
+
+static void mvpp2_port_fc_adv_enable(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+       val |= MVPP2_GMAC_FC_ADV_EN;
+       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+}
+
+static void mvpp2_port_enable(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+       val |= MVPP2_GMAC_PORT_EN_MASK;
+       val |= MVPP2_GMAC_MIB_CNTR_EN_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+static void mvpp2_port_disable(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+       val &= ~(MVPP2_GMAC_PORT_EN_MASK);
+       writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set IEEE 802.3x Flow Control Xon Packet Transmission Mode */
+static void mvpp2_port_periodic_xon_disable(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_1_REG) &
+                   ~MVPP2_GMAC_PERIODIC_XON_EN_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+/* Configure loopback port */
+static void mvpp2_port_loopback_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+
+       if (port->speed == 1000)
+               val |= MVPP2_GMAC_GMII_LB_EN_MASK;
+       else
+               val &= ~MVPP2_GMAC_GMII_LB_EN_MASK;
+
+       if (port->phy_interface == PHY_INTERFACE_MODE_SGMII)
+               val |= MVPP2_GMAC_PCS_LB_EN_MASK;
+       else
+               val &= ~MVPP2_GMAC_PCS_LB_EN_MASK;
+
+       writel(val, port->base + MVPP2_GMAC_CTRL_1_REG);
+}
+
+static void mvpp2_port_reset(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+                   ~MVPP2_GMAC_PORT_RESET_MASK;
+       writel(val, port->base + MVPP2_GMAC_CTRL_2_REG);
+
+       while (readl(port->base + MVPP2_GMAC_CTRL_2_REG) &
+              MVPP2_GMAC_PORT_RESET_MASK)
+               continue;
+}
+
+/* Change maximum receive size of the port */
+static inline void mvpp2_gmac_max_rx_size_set(struct mvpp2_port *port)
+{
+       u32 val;
+
+       val = readl(port->base + MVPP2_GMAC_CTRL_0_REG);
+       val &= ~MVPP2_GMAC_MAX_RX_SIZE_MASK;
+       val |= (((port->pkt_size - MVPP2_MH_SIZE) / 2) <<
+                   MVPP2_GMAC_MAX_RX_SIZE_OFFS);
+       writel(val, port->base + MVPP2_GMAC_CTRL_0_REG);
+}
+
+/* Set defaults to the MVPP2 port */
+static void mvpp2_defaults_set(struct mvpp2_port *port)
+{
+       int tx_port_num, val, queue, ptxq, lrxq;
+
+       /* Configure port to loopback if needed */
+       if (port->flags & MVPP2_F_LOOPBACK)
+               mvpp2_port_loopback_set(port);
+
+       /* Update TX FIFO MIN Threshold */
+       val = readl(port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+       val &= ~MVPP2_GMAC_TX_FIFO_MIN_TH_ALL_MASK;
+       /* Min. TX threshold must be less than minimal packet length */
+       val |= MVPP2_GMAC_TX_FIFO_MIN_TH_MASK(64 - 4 - 2);
+       writel(val, port->base + MVPP2_GMAC_PORT_FIFO_CFG_1_REG);
+
+       /* Disable Legacy WRR, Disable EJP, Release from reset */
+       tx_port_num = mvpp2_egress_port(port);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG,
+                   tx_port_num);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_CMD_1_REG, 0);
+
+       /* Close bandwidth for all queues */
+       for (queue = 0; queue < MVPP2_MAX_TXQ; queue++) {
+               ptxq = mvpp2_txq_phys(port->id, queue);
+               mvpp2_write(port->priv,
+                           MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(ptxq), 0);
+       }
+
+       /* Set refill period to 1 usec, refill tokens
+        * and bucket size to maximum
+        */
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PERIOD_REG,
+                   port->priv->tclk / USEC_PER_SEC);
+       val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_REFILL_REG);
+       val &= ~MVPP2_TXP_REFILL_PERIOD_ALL_MASK;
+       val |= MVPP2_TXP_REFILL_PERIOD_MASK(1);
+       val |= MVPP2_TXP_REFILL_TOKENS_ALL_MASK;
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_REFILL_REG, val);
+       val = MVPP2_TXP_TOKEN_SIZE_MAX;
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+
+       /* Set MaximumLowLatencyPacketSize value to 256 */
+       mvpp2_write(port->priv, MVPP2_RX_CTRL_REG(port->id),
+                   MVPP2_RX_USE_PSEUDO_FOR_CSUM_MASK |
+                   MVPP2_RX_LOW_LATENCY_PKT_SIZE(256));
+
+       /* Enable Rx cache snoop */
+       for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+               queue = port->rxqs[lrxq]->id;
+               val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+               val |= MVPP2_SNOOP_PKT_SIZE_MASK |
+                          MVPP2_SNOOP_BUF_HDR_MASK;
+               mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+       }
+
+       /* At default, mask all interrupts to all present cpus */
+       mvpp2_interrupts_disable(port);
+}
+
+/* Enable/disable receiving packets */
+static void mvpp2_ingress_enable(struct mvpp2_port *port)
+{
+       u32 val;
+       int lrxq, queue;
+
+       for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+               queue = port->rxqs[lrxq]->id;
+               val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+               val &= ~MVPP2_RXQ_DISABLE_MASK;
+               mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+       }
+}
+
+static void mvpp2_ingress_disable(struct mvpp2_port *port)
+{
+       u32 val;
+       int lrxq, queue;
+
+       for (lrxq = 0; lrxq < rxq_number; lrxq++) {
+               queue = port->rxqs[lrxq]->id;
+               val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(queue));
+               val |= MVPP2_RXQ_DISABLE_MASK;
+               mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(queue), val);
+       }
+}
+
+/* Enable transmit via physical egress queue
+ * - HW starts take descriptors from DRAM
+ */
+static void mvpp2_egress_enable(struct mvpp2_port *port)
+{
+       u32 qmap;
+       int queue;
+       int tx_port_num = mvpp2_egress_port(port);
+
+       /* Enable all initialized TXs. */
+       qmap = 0;
+       for (queue = 0; queue < txq_number; queue++) {
+               struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+               if (txq->descs != NULL)
+                       qmap |= (1 << queue);
+       }
+
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG, qmap);
+}
+
+/* Disable transmit via physical egress queue
+ * - HW doesn't take descriptors from DRAM
+ */
+static void mvpp2_egress_disable(struct mvpp2_port *port)
+{
+       u32 reg_data;
+       int delay;
+       int tx_port_num = mvpp2_egress_port(port);
+
+       /* Issue stop command for active channels only */
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+       reg_data = (mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG)) &
+                   MVPP2_TXP_SCHED_ENQ_MASK;
+       if (reg_data != 0)
+               mvpp2_write(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG,
+                           (reg_data << MVPP2_TXP_SCHED_DISQ_OFFSET));
+
+       /* Wait for all Tx activity to terminate. */
+       delay = 0;
+       do {
+               if (delay >= MVPP2_TX_DISABLE_TIMEOUT_MSEC) {
+                       netdev_warn(port->dev,
+                                   "Tx stop timed out, status=0x%08x\n",
+                                   reg_data);
+                       break;
+               }
+               mdelay(1);
+               delay++;
+
+               /* Check port TX Command register that all
+                * Tx queues are stopped
+                */
+               reg_data = mvpp2_read(port->priv, MVPP2_TXP_SCHED_Q_CMD_REG);
+       } while (reg_data & MVPP2_TXP_SCHED_ENQ_MASK);
+}
+
+/* Rx descriptors helper methods */
+
+/* Get number of Rx descriptors occupied by received packets */
+static inline int
+mvpp2_rxq_received(struct mvpp2_port *port, int rxq_id)
+{
+       u32 val = mvpp2_read(port->priv, MVPP2_RXQ_STATUS_REG(rxq_id));
+
+       return val & MVPP2_RXQ_OCCUPIED_MASK;
+}
+
+/* Update Rx queue status with the number of occupied and available
+ * Rx descriptor slots.
+ */
+static inline void
+mvpp2_rxq_status_update(struct mvpp2_port *port, int rxq_id,
+                       int used_count, int free_count)
+{
+       /* Decrement the number of used descriptors and increment count
+        * increment the number of free descriptors.
+        */
+       u32 val = used_count | (free_count << MVPP2_RXQ_NUM_NEW_OFFSET);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_STATUS_UPDATE_REG(rxq_id), val);
+}
+
+/* Get pointer to next RX descriptor to be processed by SW */
+static inline struct mvpp2_rx_desc *
+mvpp2_rxq_next_desc_get(struct mvpp2_rx_queue *rxq)
+{
+       int rx_desc = rxq->next_desc_to_proc;
+
+       rxq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(rxq, rx_desc);
+       prefetch(rxq->descs + rxq->next_desc_to_proc);
+       return rxq->descs + rx_desc;
+}
+
+/* Set rx queue offset */
+static void mvpp2_rxq_offset_set(struct mvpp2_port *port,
+                                int prxq, int offset)
+{
+       u32 val;
+
+       /* Convert offset from bytes to units of 32 bytes */
+       offset = offset >> 5;
+
+       val = mvpp2_read(port->priv, MVPP2_RXQ_CONFIG_REG(prxq));
+       val &= ~MVPP2_RXQ_PACKET_OFFSET_MASK;
+
+       /* Offset is in */
+       val |= ((offset << MVPP2_RXQ_PACKET_OFFSET_OFFS) &
+                   MVPP2_RXQ_PACKET_OFFSET_MASK);
+
+       mvpp2_write(port->priv, MVPP2_RXQ_CONFIG_REG(prxq), val);
+}
+
+/* Obtain BM cookie information from descriptor */
+static u32 mvpp2_bm_cookie_build(struct mvpp2_rx_desc *rx_desc)
+{
+       int pool = (rx_desc->status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+                  MVPP2_RXD_BM_POOL_ID_OFFS;
+       int cpu = smp_processor_id();
+
+       return ((pool & 0xFF) << MVPP2_BM_COOKIE_POOL_OFFS) |
+              ((cpu & 0xFF) << MVPP2_BM_COOKIE_CPU_OFFS);
+}
+
+/* Tx descriptors helper methods */
+
+/* Get number of Tx descriptors waiting to be transmitted by HW */
+static int mvpp2_txq_pend_desc_num_get(struct mvpp2_port *port,
+                                      struct mvpp2_tx_queue *txq)
+{
+       u32 val;
+
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+
+       return val & MVPP2_TXQ_PENDING_MASK;
+}
+
+/* Get pointer to next Tx descriptor to be processed (send) by HW */
+static struct mvpp2_tx_desc *
+mvpp2_txq_next_desc_get(struct mvpp2_tx_queue *txq)
+{
+       int tx_desc = txq->next_desc_to_proc;
+
+       txq->next_desc_to_proc = MVPP2_QUEUE_NEXT_DESC(txq, tx_desc);
+       return txq->descs + tx_desc;
+}
+
+/* Update HW with number of aggregated Tx descriptors to be sent */
+static void mvpp2_aggr_txq_pend_desc_add(struct mvpp2_port *port, int pending)
+{
+       /* aggregated access - relevant TXQ number is written in TX desc */
+       mvpp2_write(port->priv, MVPP2_AGGR_TXQ_UPDATE_REG, pending);
+}
+
+
+/* Check if there are enough free descriptors in aggregated txq.
+ * If not, update the number of occupied descriptors and repeat the check.
+ */
+static int mvpp2_aggr_desc_num_check(struct mvpp2 *priv,
+                                    struct mvpp2_tx_queue *aggr_txq, int num)
+{
+       if ((aggr_txq->count + num) > aggr_txq->size) {
+               /* Update number of occupied aggregated Tx descriptors */
+               int cpu = smp_processor_id();
+               u32 val = mvpp2_read(priv, MVPP2_AGGR_TXQ_STATUS_REG(cpu));
+
+               aggr_txq->count = val & MVPP2_AGGR_TXQ_PENDING_MASK;
+       }
+
+       if ((aggr_txq->count + num) > aggr_txq->size)
+               return -ENOMEM;
+
+       return 0;
+}
+
+/* Reserved Tx descriptors allocation request */
+static int mvpp2_txq_alloc_reserved_desc(struct mvpp2 *priv,
+                                        struct mvpp2_tx_queue *txq, int num)
+{
+       u32 val;
+
+       val = (txq->id << MVPP2_TXQ_RSVD_REQ_Q_OFFSET) | num;
+       mvpp2_write(priv, MVPP2_TXQ_RSVD_REQ_REG, val);
+
+       val = mvpp2_read(priv, MVPP2_TXQ_RSVD_RSLT_REG);
+
+       return val & MVPP2_TXQ_RSVD_RSLT_MASK;
+}
+
+/* Check if there are enough reserved descriptors for transmission.
+ * If not, request chunk of reserved descriptors and check again.
+ */
+static int mvpp2_txq_reserved_desc_num_proc(struct mvpp2 *priv,
+                                           struct mvpp2_tx_queue *txq,
+                                           struct mvpp2_txq_pcpu *txq_pcpu,
+                                           int num)
+{
+       int req, cpu, desc_count;
+
+       if (txq_pcpu->reserved_num >= num)
+               return 0;
+
+       /* Not enough descriptors reserved! Update the reserved descriptor
+        * count and check again.
+        */
+
+       desc_count = 0;
+       /* Compute total of used descriptors */
+       for_each_present_cpu(cpu) {
+               struct mvpp2_txq_pcpu *txq_pcpu_aux;
+
+               txq_pcpu_aux = per_cpu_ptr(txq->pcpu, cpu);
+               desc_count += txq_pcpu_aux->count;
+               desc_count += txq_pcpu_aux->reserved_num;
+       }
+
+       req = max(MVPP2_CPU_DESC_CHUNK, num - txq_pcpu->reserved_num);
+       desc_count += req;
+
+       if (desc_count >
+          (txq->size - (num_present_cpus() * MVPP2_CPU_DESC_CHUNK)))
+               return -ENOMEM;
+
+       txq_pcpu->reserved_num += mvpp2_txq_alloc_reserved_desc(priv, txq, req);
+
+       /* OK, the descriptor cound has been updated: check again. */
+       if (txq_pcpu->reserved_num < num)
+               return -ENOMEM;
+       return 0;
+}
+
+/* Release the last allocated Tx descriptor. Useful to handle DMA
+ * mapping failures in the Tx path.
+ */
+static void mvpp2_txq_desc_put(struct mvpp2_tx_queue *txq)
+{
+       if (txq->next_desc_to_proc == 0)
+               txq->next_desc_to_proc = txq->last_desc - 1;
+       else
+               txq->next_desc_to_proc--;
+}
+
+/* Set Tx descriptors fields relevant for CSUM calculation */
+static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+                              int ip_hdr_len, int l4_proto)
+{
+       u32 command;
+
+       /* fields: L3_offset, IP_hdrlen, L3_type, G_IPv4_chk,
+        * G_L4_chk, L4_type required only for checksum calculation
+        */
+       command = (l3_offs << MVPP2_TXD_L3_OFF_SHIFT);
+       command |= (ip_hdr_len << MVPP2_TXD_IP_HLEN_SHIFT);
+       command |= MVPP2_TXD_IP_CSUM_DISABLE;
+
+       if (l3_proto == swab16(ETH_P_IP)) {
+               command &= ~MVPP2_TXD_IP_CSUM_DISABLE;  /* enable IPv4 csum */
+               command &= ~MVPP2_TXD_L3_IP6;           /* enable IPv4 */
+       } else {
+               command |= MVPP2_TXD_L3_IP6;            /* enable IPv6 */
+       }
+
+       if (l4_proto == IPPROTO_TCP) {
+               command &= ~MVPP2_TXD_L4_UDP;           /* enable TCP */
+               command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
+       } else if (l4_proto == IPPROTO_UDP) {
+               command |= MVPP2_TXD_L4_UDP;            /* enable UDP */
+               command &= ~MVPP2_TXD_L4_CSUM_FRAG;     /* generate L4 csum */
+       } else {
+               command |= MVPP2_TXD_L4_CSUM_NOT;
+       }
+
+       return command;
+}
+
+/* Get number of sent descriptors and decrement counter.
+ * The number of sent descriptors is returned.
+ * Per-CPU access
+ */
+static inline int mvpp2_txq_sent_desc_proc(struct mvpp2_port *port,
+                                          struct mvpp2_tx_queue *txq)
+{
+       u32 val;
+
+       /* Reading status reg resets transmitted descriptor counter */
+       val = mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(txq->id));
+
+       return (val & MVPP2_TRANSMITTED_COUNT_MASK) >>
+               MVPP2_TRANSMITTED_COUNT_OFFSET;
+}
+
+static void mvpp2_txq_sent_counter_clear(void *arg)
+{
+       struct mvpp2_port *port = arg;
+       int queue;
+
+       for (queue = 0; queue < txq_number; queue++) {
+               int id = port->txqs[queue]->id;
+
+               mvpp2_read(port->priv, MVPP2_TXQ_SENT_REG(id));
+       }
+}
+
+/* Set max sizes for Tx queues */
+static void mvpp2_txp_max_tx_size_set(struct mvpp2_port *port)
+{
+       u32     val, size, mtu;
+       int     txq, tx_port_num;
+
+       mtu = port->pkt_size * 8;
+       if (mtu > MVPP2_TXP_MTU_MAX)
+               mtu = MVPP2_TXP_MTU_MAX;
+
+       /* WA for wrong Token bucket update: Set MTU value = 3*real MTU value */
+       mtu = 3 * mtu;
+
+       /* Indirect access to registers */
+       tx_port_num = mvpp2_egress_port(port);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+       /* Set MTU */
+       val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_MTU_REG);
+       val &= ~MVPP2_TXP_MTU_MAX;
+       val |= mtu;
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_MTU_REG, val);
+
+       /* TXP token size and all TXQs token size must be larger that MTU */
+       val = mvpp2_read(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG);
+       size = val & MVPP2_TXP_TOKEN_SIZE_MAX;
+       if (size < mtu) {
+               size = mtu;
+               val &= ~MVPP2_TXP_TOKEN_SIZE_MAX;
+               val |= size;
+               mvpp2_write(port->priv, MVPP2_TXP_SCHED_TOKEN_SIZE_REG, val);
+       }
+
+       for (txq = 0; txq < txq_number; txq++) {
+               val = mvpp2_read(port->priv,
+                                MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq));
+               size = val & MVPP2_TXQ_TOKEN_SIZE_MAX;
+
+               if (size < mtu) {
+                       size = mtu;
+                       val &= ~MVPP2_TXQ_TOKEN_SIZE_MAX;
+                       val |= size;
+                       mvpp2_write(port->priv,
+                                   MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq),
+                                   val);
+               }
+       }
+}
+
+/* Set the number of packets that will be received before Rx interrupt
+ * will be generated by HW.
+ */
+static void mvpp2_rx_pkts_coal_set(struct mvpp2_port *port,
+                                  struct mvpp2_rx_queue *rxq, u32 pkts)
+{
+       u32 val;
+
+       val = (pkts & MVPP2_OCCUPIED_THRESH_MASK);
+       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_write(port->priv, MVPP2_RXQ_THRESH_REG, val);
+
+       rxq->pkts_coal = pkts;
+}
+
+/* Set the time delay in usec before Rx interrupt */
+static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
+                                  struct mvpp2_rx_queue *rxq, u32 usec)
+{
+       u32 val;
+
+       val = (port->priv->tclk / USEC_PER_SEC) * usec;
+       mvpp2_write(port->priv, MVPP2_ISR_RX_THRESHOLD_REG(rxq->id), val);
+
+       rxq->time_coal = usec;
+}
+
+/* Set threshold for TX_DONE pkts coalescing */
+static void mvpp2_tx_done_pkts_coal_set(void *arg)
+{
+       struct mvpp2_port *port = arg;
+       int queue;
+       u32 val;
+
+       for (queue = 0; queue < txq_number; queue++) {
+               struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+               val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
+                      MVPP2_TRANSMITTED_THRESH_MASK;
+               mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+               mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
+       }
+}
+
+/* Free Tx queue skbuffs */
+static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
+                               struct mvpp2_tx_queue *txq,
+                               struct mvpp2_txq_pcpu *txq_pcpu, int num)
+{
+       int i;
+
+       for (i = 0; i < num; i++) {
+               struct mvpp2_tx_desc *tx_desc = txq->descs +
+                                                       txq_pcpu->txq_get_index;
+               struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
+
+               mvpp2_txq_inc_get(txq_pcpu);
+
+               if (!skb)
+                       continue;
+
+               dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr,
+                                tx_desc->data_size, DMA_TO_DEVICE);
+               dev_kfree_skb_any(skb);
+       }
+}
+
+static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
+                                                       u32 cause)
+{
+       int queue = fls(cause) - 1;
+
+       return port->rxqs[queue];
+}
+
+static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
+                                                       u32 cause)
+{
+       int queue = fls(cause >> 16) - 1;
+
+       return port->txqs[queue];
+}
+
+/* Handle end of transmission */
+static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
+                          struct mvpp2_txq_pcpu *txq_pcpu)
+{
+       struct netdev_queue *nq = netdev_get_tx_queue(port->dev, txq->log_id);
+       int tx_done;
+
+       if (txq_pcpu->cpu != smp_processor_id())
+               netdev_err(port->dev, "wrong cpu on the end of Tx processing\n");
+
+       tx_done = mvpp2_txq_sent_desc_proc(port, txq);
+       if (!tx_done)
+               return;
+       mvpp2_txq_bufs_free(port, txq, txq_pcpu, tx_done);
+
+       txq_pcpu->count -= tx_done;
+
+       if (netif_tx_queue_stopped(nq))
+               if (txq_pcpu->size - txq_pcpu->count >= MAX_SKB_FRAGS + 1)
+                       netif_tx_wake_queue(nq);
+}
+
+/* Rx/Tx queue initialization/cleanup methods */
+
+/* Allocate and initialize descriptors for aggr TXQ */
+static int mvpp2_aggr_txq_init(struct platform_device *pdev,
+                              struct mvpp2_tx_queue *aggr_txq,
+                              int desc_num, int cpu,
+                              struct mvpp2 *priv)
+{
+       /* Allocate memory for TX descriptors */
+       aggr_txq->descs = dma_alloc_coherent(&pdev->dev,
+                               desc_num * MVPP2_DESC_ALIGNED_SIZE,
+                               &aggr_txq->descs_phys, GFP_KERNEL);
+       if (!aggr_txq->descs)
+               return -ENOMEM;
+
+       /* Make sure descriptor address is cache line size aligned  */
+       BUG_ON(aggr_txq->descs !=
+              PTR_ALIGN(aggr_txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+       aggr_txq->last_desc = aggr_txq->size - 1;
+
+       /* Aggr TXQ no reset WA */
+       aggr_txq->next_desc_to_proc = mvpp2_read(priv,
+                                                MVPP2_AGGR_TXQ_INDEX_REG(cpu));
+
+       /* Set Tx descriptors queue starting address */
+       /* indirect access */
+       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_ADDR_REG(cpu),
+                   aggr_txq->descs_phys);
+       mvpp2_write(priv, MVPP2_AGGR_TXQ_DESC_SIZE_REG(cpu), desc_num);
+
+       return 0;
+}
+
+/* Create a specified Rx queue */
+static int mvpp2_rxq_init(struct mvpp2_port *port,
+                         struct mvpp2_rx_queue *rxq)
+
+{
+       rxq->size = port->rx_ring_size;
+
+       /* Allocate memory for RX descriptors */
+       rxq->descs = dma_alloc_coherent(port->dev->dev.parent,
+                                       rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                       &rxq->descs_phys, GFP_KERNEL);
+       if (!rxq->descs)
+               return -ENOMEM;
+
+       BUG_ON(rxq->descs !=
+              PTR_ALIGN(rxq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+       rxq->last_desc = rxq->size - 1;
+
+       /* Zero occupied and non-occupied counters - direct access */
+       mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+
+       /* Set Rx descriptors queue starting address - indirect access */
+       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, rxq->descs_phys);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, rxq->size);
+       mvpp2_write(port->priv, MVPP2_RXQ_INDEX_REG, 0);
+
+       /* Set Offset */
+       mvpp2_rxq_offset_set(port, rxq->id, NET_SKB_PAD);
+
+       /* Set coalescing pkts and time */
+       mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
+       mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+
+       /* Add number of descriptors ready for receiving packets */
+       mvpp2_rxq_status_update(port, rxq->id, 0, rxq->size);
+
+       return 0;
+}
+
+/* Push packets received by the RXQ to BM pool */
+static void mvpp2_rxq_drop_pkts(struct mvpp2_port *port,
+                               struct mvpp2_rx_queue *rxq)
+{
+       int rx_received, i;
+
+       rx_received = mvpp2_rxq_received(port, rxq->id);
+       if (!rx_received)
+               return;
+
+       for (i = 0; i < rx_received; i++) {
+               struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+               u32 bm = mvpp2_bm_cookie_build(rx_desc);
+
+               mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
+                                 rx_desc->buf_cookie);
+       }
+       mvpp2_rxq_status_update(port, rxq->id, rx_received, rx_received);
+}
+
+/* Cleanup Rx queue */
+static void mvpp2_rxq_deinit(struct mvpp2_port *port,
+                            struct mvpp2_rx_queue *rxq)
+{
+       mvpp2_rxq_drop_pkts(port, rxq);
+
+       if (rxq->descs)
+               dma_free_coherent(port->dev->dev.parent,
+                                 rxq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                 rxq->descs,
+                                 rxq->descs_phys);
+
+       rxq->descs             = NULL;
+       rxq->last_desc         = 0;
+       rxq->next_desc_to_proc = 0;
+       rxq->descs_phys        = 0;
+
+       /* Clear Rx descriptors queue starting address and size;
+        * free descriptor number
+        */
+       mvpp2_write(port->priv, MVPP2_RXQ_STATUS_REG(rxq->id), 0);
+       mvpp2_write(port->priv, MVPP2_RXQ_NUM_REG, rxq->id);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_ADDR_REG, 0);
+       mvpp2_write(port->priv, MVPP2_RXQ_DESC_SIZE_REG, 0);
+}
+
+/* Create and initialize a Tx queue */
+static int mvpp2_txq_init(struct mvpp2_port *port,
+                         struct mvpp2_tx_queue *txq)
+{
+       u32 val;
+       int cpu, desc, desc_per_txq, tx_port_num;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+
+       txq->size = port->tx_ring_size;
+
+       /* Allocate memory for Tx descriptors */
+       txq->descs = dma_alloc_coherent(port->dev->dev.parent,
+                               txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                               &txq->descs_phys, GFP_KERNEL);
+       if (!txq->descs)
+               return -ENOMEM;
+
+       /* Make sure descriptor address is cache line size aligned  */
+       BUG_ON(txq->descs !=
+              PTR_ALIGN(txq->descs, MVPP2_CPU_D_CACHE_LINE_SIZE));
+
+       txq->last_desc = txq->size - 1;
+
+       /* Set Tx descriptors queue starting address - indirect access */
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, txq->descs_phys);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, txq->size &
+                                            MVPP2_TXQ_DESC_SIZE_MASK);
+       mvpp2_write(port->priv, MVPP2_TXQ_INDEX_REG, 0);
+       mvpp2_write(port->priv, MVPP2_TXQ_RSVD_CLR_REG,
+                   txq->id << MVPP2_TXQ_RSVD_CLR_OFFSET);
+       val = mvpp2_read(port->priv, MVPP2_TXQ_PENDING_REG);
+       val &= ~MVPP2_TXQ_PENDING_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_PENDING_REG, val);
+
+       /* Calculate base address in prefetch buffer. We reserve 16 descriptors
+        * for each existing TXQ.
+        * TCONTS for PON port must be continuous from 0 to MVPP2_MAX_TCONT
+        * GBE ports assumed to be continious from 0 to MVPP2_MAX_PORTS
+        */
+       desc_per_txq = 16;
+       desc = (port->id * MVPP2_MAX_TXQ * desc_per_txq) +
+              (txq->log_id * desc_per_txq);
+
+       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG,
+                   MVPP2_PREF_BUF_PTR(desc) | MVPP2_PREF_BUF_SIZE_16 |
+                   MVPP2_PREF_BUF_THRESH(desc_per_txq/2));
+
+       /* WRR / EJP configuration - indirect access */
+       tx_port_num = mvpp2_egress_port(port);
+       mvpp2_write(port->priv, MVPP2_TXP_SCHED_PORT_INDEX_REG, tx_port_num);
+
+       val = mvpp2_read(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id));
+       val &= ~MVPP2_TXQ_REFILL_PERIOD_ALL_MASK;
+       val |= MVPP2_TXQ_REFILL_PERIOD_MASK(1);
+       val |= MVPP2_TXQ_REFILL_TOKENS_ALL_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_SCHED_REFILL_REG(txq->log_id), val);
+
+       val = MVPP2_TXQ_TOKEN_SIZE_MAX;
+       mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_SIZE_REG(txq->log_id),
+                   val);
+
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+               txq_pcpu->size = txq->size;
+               txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
+                                          sizeof(*txq_pcpu->tx_skb),
+                                          GFP_KERNEL);
+               if (!txq_pcpu->tx_skb) {
+                       dma_free_coherent(port->dev->dev.parent,
+                                         txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                         txq->descs, txq->descs_phys);
+                       return -ENOMEM;
+               }
+
+               txq_pcpu->count = 0;
+               txq_pcpu->reserved_num = 0;
+               txq_pcpu->txq_put_index = 0;
+               txq_pcpu->txq_get_index = 0;
+       }
+
+       return 0;
+}
+
+/* Free allocated TXQ resources */
+static void mvpp2_txq_deinit(struct mvpp2_port *port,
+                            struct mvpp2_tx_queue *txq)
+{
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       int cpu;
+
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+               kfree(txq_pcpu->tx_skb);
+       }
+
+       if (txq->descs)
+               dma_free_coherent(port->dev->dev.parent,
+                                 txq->size * MVPP2_DESC_ALIGNED_SIZE,
+                                 txq->descs, txq->descs_phys);
+
+       txq->descs             = NULL;
+       txq->last_desc         = 0;
+       txq->next_desc_to_proc = 0;
+       txq->descs_phys        = 0;
+
+       /* Set minimum bandwidth for disabled TXQs */
+       mvpp2_write(port->priv, MVPP2_TXQ_SCHED_TOKEN_CNTR_REG(txq->id), 0);
+
+       /* Set Tx descriptors queue starting address and size */
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_ADDR_REG, 0);
+       mvpp2_write(port->priv, MVPP2_TXQ_DESC_SIZE_REG, 0);
+}
+
+/* Cleanup Tx ports */
+static void mvpp2_txq_clean(struct mvpp2_port *port, struct mvpp2_tx_queue *txq)
+{
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       int delay, pending, cpu;
+       u32 val;
+
+       mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
+       val = mvpp2_read(port->priv, MVPP2_TXQ_PREF_BUF_REG);
+       val |= MVPP2_TXQ_DRAIN_EN_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+
+       /* The napi queue has been stopped so wait for all packets
+        * to be transmitted.
+        */
+       delay = 0;
+       do {
+               if (delay >= MVPP2_TX_PENDING_TIMEOUT_MSEC) {
+                       netdev_warn(port->dev,
+                                   "port %d: cleaning queue %d timed out\n",
+                                   port->id, txq->log_id);
+                       break;
+               }
+               mdelay(1);
+               delay++;
+
+               pending = mvpp2_txq_pend_desc_num_get(port, txq);
+       } while (pending);
+
+       val &= ~MVPP2_TXQ_DRAIN_EN_MASK;
+       mvpp2_write(port->priv, MVPP2_TXQ_PREF_BUF_REG, val);
+
+       for_each_present_cpu(cpu) {
+               txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+
+               /* Release all packets */
+               mvpp2_txq_bufs_free(port, txq, txq_pcpu, txq_pcpu->count);
+
+               /* Reset queue */
+               txq_pcpu->count = 0;
+               txq_pcpu->txq_put_index = 0;
+               txq_pcpu->txq_get_index = 0;
+       }
+}
+
+/* Cleanup all Tx queues */
+static void mvpp2_cleanup_txqs(struct mvpp2_port *port)
+{
+       struct mvpp2_tx_queue *txq;
+       int queue;
+       u32 val;
+
+       val = mvpp2_read(port->priv, MVPP2_TX_PORT_FLUSH_REG);
+
+       /* Reset Tx ports and delete Tx queues */
+       val |= MVPP2_TX_PORT_FLUSH_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+
+       for (queue = 0; queue < txq_number; queue++) {
+               txq = port->txqs[queue];
+               mvpp2_txq_clean(port, txq);
+               mvpp2_txq_deinit(port, txq);
+       }
+
+       on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+
+       val &= ~MVPP2_TX_PORT_FLUSH_MASK(port->id);
+       mvpp2_write(port->priv, MVPP2_TX_PORT_FLUSH_REG, val);
+}
+
+/* Cleanup all Rx queues */
+static void mvpp2_cleanup_rxqs(struct mvpp2_port *port)
+{
+       int queue;
+
+       for (queue = 0; queue < rxq_number; queue++)
+               mvpp2_rxq_deinit(port, port->rxqs[queue]);
+}
+
+/* Init all Rx queues for port */
+static int mvpp2_setup_rxqs(struct mvpp2_port *port)
+{
+       int queue, err;
+
+       for (queue = 0; queue < rxq_number; queue++) {
+               err = mvpp2_rxq_init(port, port->rxqs[queue]);
+               if (err)
+                       goto err_cleanup;
+       }
+       return 0;
+
+err_cleanup:
+       mvpp2_cleanup_rxqs(port);
+       return err;
+}
+
+/* Init all tx queues for port */
+static int mvpp2_setup_txqs(struct mvpp2_port *port)
+{
+       struct mvpp2_tx_queue *txq;
+       int queue, err;
+
+       for (queue = 0; queue < txq_number; queue++) {
+               txq = port->txqs[queue];
+               err = mvpp2_txq_init(port, txq);
+               if (err)
+                       goto err_cleanup;
+       }
+
+       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
+       on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
+       return 0;
+
+err_cleanup:
+       mvpp2_cleanup_txqs(port);
+       return err;
+}
+
+/* The callback for per-port interrupt */
+static irqreturn_t mvpp2_isr(int irq, void *dev_id)
+{
+       struct mvpp2_port *port = (struct mvpp2_port *)dev_id;
+
+       mvpp2_interrupts_disable(port);
+
+       napi_schedule(&port->napi);
+
+       return IRQ_HANDLED;
+}
+
+/* Adjust link */
+static void mvpp2_link_event(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct phy_device *phydev = port->phy_dev;
+       int status_change = 0;
+       u32 val;
+
+       if (phydev->link) {
+               if ((port->speed != phydev->speed) ||
+                   (port->duplex != phydev->duplex)) {
+                       u32 val;
+
+                       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       val &= ~(MVPP2_GMAC_CONFIG_MII_SPEED |
+                                MVPP2_GMAC_CONFIG_GMII_SPEED |
+                                MVPP2_GMAC_CONFIG_FULL_DUPLEX |
+                                MVPP2_GMAC_AN_SPEED_EN |
+                                MVPP2_GMAC_AN_DUPLEX_EN);
+
+                       if (phydev->duplex)
+                               val |= MVPP2_GMAC_CONFIG_FULL_DUPLEX;
+
+                       if (phydev->speed == SPEED_1000)
+                               val |= MVPP2_GMAC_CONFIG_GMII_SPEED;
+                       else if (phydev->speed == SPEED_100)
+                               val |= MVPP2_GMAC_CONFIG_MII_SPEED;
+
+                       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+
+                       port->duplex = phydev->duplex;
+                       port->speed  = phydev->speed;
+               }
+       }
+
+       if (phydev->link != port->link) {
+               if (!phydev->link) {
+                       port->duplex = -1;
+                       port->speed = 0;
+               }
+
+               port->link = phydev->link;
+               status_change = 1;
+       }
+
+       if (status_change) {
+               if (phydev->link) {
+                       val = readl(port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       val |= (MVPP2_GMAC_FORCE_LINK_PASS |
+                               MVPP2_GMAC_FORCE_LINK_DOWN);
+                       writel(val, port->base + MVPP2_GMAC_AUTONEG_CONFIG);
+                       mvpp2_egress_enable(port);
+                       mvpp2_ingress_enable(port);
+               } else {
+                       mvpp2_ingress_disable(port);
+                       mvpp2_egress_disable(port);
+               }
+               phy_print_status(phydev);
+       }
+}
+
+/* Main RX/TX processing routines */
+
+/* Display more error info */
+static void mvpp2_rx_error(struct mvpp2_port *port,
+                          struct mvpp2_rx_desc *rx_desc)
+{
+       u32 status = rx_desc->status;
+
+       switch (status & MVPP2_RXD_ERR_CODE_MASK) {
+       case MVPP2_RXD_ERR_CRC:
+               netdev_err(port->dev, "bad rx status %08x (crc error), size=%d\n",
+                          status, rx_desc->data_size);
+               break;
+       case MVPP2_RXD_ERR_OVERRUN:
+               netdev_err(port->dev, "bad rx status %08x (overrun error), size=%d\n",
+                          status, rx_desc->data_size);
+               break;
+       case MVPP2_RXD_ERR_RESOURCE:
+               netdev_err(port->dev, "bad rx status %08x (resource error), size=%d\n",
+                          status, rx_desc->data_size);
+               break;
+       }
+}
+
+/* Handle RX checksum offload */
+static void mvpp2_rx_csum(struct mvpp2_port *port, u32 status,
+                         struct sk_buff *skb)
+{
+       if (((status & MVPP2_RXD_L3_IP4) &&
+            !(status & MVPP2_RXD_IP4_HEADER_ERR)) ||
+           (status & MVPP2_RXD_L3_IP6))
+               if (((status & MVPP2_RXD_L4_UDP) ||
+                    (status & MVPP2_RXD_L4_TCP)) &&
+                    (status & MVPP2_RXD_L4_CSUM_OK)) {
+                       skb->csum = 0;
+                       skb->ip_summed = CHECKSUM_UNNECESSARY;
+                       return;
+               }
+
+       skb->ip_summed = CHECKSUM_NONE;
+}
+
+/* Reuse skb if possible, or allocate a new skb and add it to BM pool */
+static int mvpp2_rx_refill(struct mvpp2_port *port,
+                          struct mvpp2_bm_pool *bm_pool,
+                          u32 bm, int is_recycle)
+{
+       struct sk_buff *skb;
+       dma_addr_t phys_addr;
+
+       if (is_recycle &&
+           (atomic_read(&bm_pool->in_use) < bm_pool->in_use_thresh))
+               return 0;
+
+       /* No recycle or too many buffers are in use, so allocate a new skb */
+       skb = mvpp2_skb_alloc(port, bm_pool, &phys_addr, GFP_ATOMIC);
+       if (!skb)
+               return -ENOMEM;
+
+       mvpp2_pool_refill(port, bm, (u32)phys_addr, (u32)skb);
+       atomic_dec(&bm_pool->in_use);
+       return 0;
+}
+
+/* Handle tx checksum */
+static u32 mvpp2_skb_tx_csum(struct mvpp2_port *port, struct sk_buff *skb)
+{
+       if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               int ip_hdr_len = 0;
+               u8 l4_proto;
+
+               if (skb->protocol == htons(ETH_P_IP)) {
+                       struct iphdr *ip4h = ip_hdr(skb);
+
+                       /* Calculate IPv4 checksum and L4 checksum */
+                       ip_hdr_len = ip4h->ihl;
+                       l4_proto = ip4h->protocol;
+               } else if (skb->protocol == htons(ETH_P_IPV6)) {
+                       struct ipv6hdr *ip6h = ipv6_hdr(skb);
+
+                       /* Read l4_protocol from one of IPv6 extra headers */
+                       if (skb_network_header_len(skb) > 0)
+                               ip_hdr_len = (skb_network_header_len(skb) >> 2);
+                       l4_proto = ip6h->nexthdr;
+               } else {
+                       return MVPP2_TXD_L4_CSUM_NOT;
+               }
+
+               return mvpp2_txq_desc_csum(skb_network_offset(skb),
+                               skb->protocol, ip_hdr_len, l4_proto);
+       }
+
+       return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
+}
+
+static void mvpp2_buff_hdr_rx(struct mvpp2_port *port,
+                             struct mvpp2_rx_desc *rx_desc)
+{
+       struct mvpp2_buff_hdr *buff_hdr;
+       struct sk_buff *skb;
+       u32 rx_status = rx_desc->status;
+       u32 buff_phys_addr;
+       u32 buff_virt_addr;
+       u32 buff_phys_addr_next;
+       u32 buff_virt_addr_next;
+       int mc_id;
+       int pool_id;
+
+       pool_id = (rx_status & MVPP2_RXD_BM_POOL_ID_MASK) >>
+                  MVPP2_RXD_BM_POOL_ID_OFFS;
+       buff_phys_addr = rx_desc->buf_phys_addr;
+       buff_virt_addr = rx_desc->buf_cookie;
+
+       do {
+               skb = (struct sk_buff *)buff_virt_addr;
+               buff_hdr = (struct mvpp2_buff_hdr *)skb->head;
+
+               mc_id = MVPP2_B_HDR_INFO_MC_ID(buff_hdr->info);
+
+               buff_phys_addr_next = buff_hdr->next_buff_phys_addr;
+               buff_virt_addr_next = buff_hdr->next_buff_virt_addr;
+
+               /* Release buffer */
+               mvpp2_bm_pool_mc_put(port, pool_id, buff_phys_addr,
+                                    buff_virt_addr, mc_id);
+
+               buff_phys_addr = buff_phys_addr_next;
+               buff_virt_addr = buff_virt_addr_next;
+
+       } while (!MVPP2_B_HDR_INFO_IS_LAST(buff_hdr->info));
+}
+
+/* Main rx processing */
+static int mvpp2_rx(struct mvpp2_port *port, int rx_todo,
+                   struct mvpp2_rx_queue *rxq)
+{
+       struct net_device *dev = port->dev;
+       int rx_received, rx_filled, i;
+       u32 rcvd_pkts = 0;
+       u32 rcvd_bytes = 0;
+
+       /* Get number of received packets and clamp the to-do */
+       rx_received = mvpp2_rxq_received(port, rxq->id);
+       if (rx_todo > rx_received)
+               rx_todo = rx_received;
+
+       rx_filled = 0;
+       for (i = 0; i < rx_todo; i++) {
+               struct mvpp2_rx_desc *rx_desc = mvpp2_rxq_next_desc_get(rxq);
+               struct mvpp2_bm_pool *bm_pool;
+               struct sk_buff *skb;
+               u32 bm, rx_status;
+               int pool, rx_bytes, err;
+
+               rx_filled++;
+               rx_status = rx_desc->status;
+               rx_bytes = rx_desc->data_size - MVPP2_MH_SIZE;
+
+               bm = mvpp2_bm_cookie_build(rx_desc);
+               pool = mvpp2_bm_cookie_pool_get(bm);
+               bm_pool = &port->priv->bm_pools[pool];
+               /* Check if buffer header is used */
+               if (rx_status & MVPP2_RXD_BUF_HDR) {
+                       mvpp2_buff_hdr_rx(port, rx_desc);
+                       continue;
+               }
+
+               /* In case of an error, release the requested buffer pointer
+                * to the Buffer Manager. This request process is controlled
+                * by the hardware, and the information about the buffer is
+                * comprised by the RX descriptor.
+                */
+               if (rx_status & MVPP2_RXD_ERR_SUMMARY) {
+                       dev->stats.rx_errors++;
+                       mvpp2_rx_error(port, rx_desc);
+                       mvpp2_pool_refill(port, bm, rx_desc->buf_phys_addr,
+                                         rx_desc->buf_cookie);
+                       continue;
+               }
+
+               skb = (struct sk_buff *)rx_desc->buf_cookie;
+
+               rcvd_pkts++;
+               rcvd_bytes += rx_bytes;
+               atomic_inc(&bm_pool->in_use);
+
+               skb_reserve(skb, MVPP2_MH_SIZE);
+               skb_put(skb, rx_bytes);
+               skb->protocol = eth_type_trans(skb, dev);
+               mvpp2_rx_csum(port, rx_status, skb);
+
+               napi_gro_receive(&port->napi, skb);
+
+               err = mvpp2_rx_refill(port, bm_pool, bm, 0);
+               if (err) {
+                       netdev_err(port->dev, "failed to refill BM pools\n");
+                       rx_filled--;
+               }
+       }
+
+       if (rcvd_pkts) {
+               struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->rx_packets += rcvd_pkts;
+               stats->rx_bytes   += rcvd_bytes;
+               u64_stats_update_end(&stats->syncp);
+       }
+
+       /* Update Rx queue management counters */
+       wmb();
+       mvpp2_rxq_status_update(port, rxq->id, rx_todo, rx_filled);
+
+       return rx_todo;
+}
+
+static inline void
+tx_desc_unmap_put(struct device *dev, struct mvpp2_tx_queue *txq,
+                 struct mvpp2_tx_desc *desc)
+{
+       dma_unmap_single(dev, desc->buf_phys_addr,
+                        desc->data_size, DMA_TO_DEVICE);
+       mvpp2_txq_desc_put(txq);
+}
+
+/* Handle tx fragmentation processing */
+static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
+                                struct mvpp2_tx_queue *aggr_txq,
+                                struct mvpp2_tx_queue *txq)
+{
+       struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+       struct mvpp2_tx_desc *tx_desc;
+       int i;
+       dma_addr_t buf_phys_addr;
+
+       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+               skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+               void *addr = page_address(frag->page.p) + frag->page_offset;
+
+               tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+               tx_desc->phys_txq = txq->id;
+               tx_desc->data_size = frag->size;
+
+               buf_phys_addr = dma_map_single(port->dev->dev.parent, addr,
+                                              tx_desc->data_size,
+                                              DMA_TO_DEVICE);
+               if (dma_mapping_error(port->dev->dev.parent, buf_phys_addr)) {
+                       mvpp2_txq_desc_put(txq);
+                       goto error;
+               }
+
+               tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+               tx_desc->buf_phys_addr = buf_phys_addr & (~MVPP2_TX_DESC_ALIGN);
+
+               if (i == (skb_shinfo(skb)->nr_frags - 1)) {
+                       /* Last descriptor */
+                       tx_desc->command = MVPP2_TXD_L_DESC;
+                       mvpp2_txq_inc_put(txq_pcpu, skb);
+               } else {
+                       /* Descriptor in the middle: Not First, Not Last */
+                       tx_desc->command = 0;
+                       mvpp2_txq_inc_put(txq_pcpu, NULL);
+               }
+       }
+
+       return 0;
+
+error:
+       /* Release all descriptors that were used to map fragments of
+        * this packet, as well as the corresponding DMA mappings
+        */
+       for (i = i - 1; i >= 0; i--) {
+               tx_desc = txq->descs + i;
+               tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+       }
+
+       return -ENOMEM;
+}
+
+/* Main tx processing */
+static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2_tx_queue *txq, *aggr_txq;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       struct mvpp2_tx_desc *tx_desc;
+       dma_addr_t buf_phys_addr;
+       int frags = 0;
+       u16 txq_id;
+       u32 tx_cmd;
+
+       txq_id = skb_get_queue_mapping(skb);
+       txq = port->txqs[txq_id];
+       txq_pcpu = this_cpu_ptr(txq->pcpu);
+       aggr_txq = &port->priv->aggr_txqs[smp_processor_id()];
+
+       frags = skb_shinfo(skb)->nr_frags + 1;
+
+       /* Check number of available descriptors */
+       if (mvpp2_aggr_desc_num_check(port->priv, aggr_txq, frags) ||
+           mvpp2_txq_reserved_desc_num_proc(port->priv, txq,
+                                            txq_pcpu, frags)) {
+               frags = 0;
+               goto out;
+       }
+
+       /* Get a descriptor for the first part of the packet */
+       tx_desc = mvpp2_txq_next_desc_get(aggr_txq);
+       tx_desc->phys_txq = txq->id;
+       tx_desc->data_size = skb_headlen(skb);
+
+       buf_phys_addr = dma_map_single(dev->dev.parent, skb->data,
+                                      tx_desc->data_size, DMA_TO_DEVICE);
+       if (unlikely(dma_mapping_error(dev->dev.parent, buf_phys_addr))) {
+               mvpp2_txq_desc_put(txq);
+               frags = 0;
+               goto out;
+       }
+       tx_desc->packet_offset = buf_phys_addr & MVPP2_TX_DESC_ALIGN;
+       tx_desc->buf_phys_addr = buf_phys_addr & ~MVPP2_TX_DESC_ALIGN;
+
+       tx_cmd = mvpp2_skb_tx_csum(port, skb);
+
+       if (frags == 1) {
+               /* First and Last descriptor */
+               tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
+               tx_desc->command = tx_cmd;
+               mvpp2_txq_inc_put(txq_pcpu, skb);
+       } else {
+               /* First but not Last */
+               tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
+               tx_desc->command = tx_cmd;
+               mvpp2_txq_inc_put(txq_pcpu, NULL);
+
+               /* Continue with other skb fragments */
+               if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
+                       tx_desc_unmap_put(port->dev->dev.parent, txq, tx_desc);
+                       frags = 0;
+                       goto out;
+               }
+       }
+
+       txq_pcpu->reserved_num -= frags;
+       txq_pcpu->count += frags;
+       aggr_txq->count += frags;
+
+       /* Enable transmit */
+       wmb();
+       mvpp2_aggr_txq_pend_desc_add(port, frags);
+
+       if (txq_pcpu->size - txq_pcpu->count < MAX_SKB_FRAGS + 1) {
+               struct netdev_queue *nq = netdev_get_tx_queue(dev, txq_id);
+
+               netif_tx_stop_queue(nq);
+       }
+out:
+       if (frags > 0) {
+               struct mvpp2_pcpu_stats *stats = this_cpu_ptr(port->stats);
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_packets++;
+               stats->tx_bytes += skb->len;
+               u64_stats_update_end(&stats->syncp);
+       } else {
+               dev->stats.tx_dropped++;
+               dev_kfree_skb_any(skb);
+       }
+
+       return NETDEV_TX_OK;
+}
+
+static inline void mvpp2_cause_error(struct net_device *dev, int cause)
+{
+       if (cause & MVPP2_CAUSE_FCS_ERR_MASK)
+               netdev_err(dev, "FCS error\n");
+       if (cause & MVPP2_CAUSE_RX_FIFO_OVERRUN_MASK)
+               netdev_err(dev, "rx fifo overrun error\n");
+       if (cause & MVPP2_CAUSE_TX_FIFO_UNDERRUN_MASK)
+               netdev_err(dev, "tx fifo underrun error\n");
+}
+
+static void mvpp2_txq_done_percpu(void *arg)
+{
+       struct mvpp2_port *port = arg;
+       u32 cause_rx_tx, cause_tx, cause_misc;
+
+       /* Rx/Tx cause register
+        *
+        * Bits 0-15: each bit indicates received packets on the Rx queue
+        * (bit 0 is for Rx queue 0).
+        *
+        * Bits 16-23: each bit indicates transmitted packets on the Tx queue
+        * (bit 16 is for Tx queue 0).
+        *
+        * Each CPU has its own Rx/Tx cause register
+        */
+       cause_rx_tx = mvpp2_read(port->priv,
+                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+       cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
+       cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
+
+       if (cause_misc) {
+               mvpp2_cause_error(port->dev, cause_misc);
+
+               /* Clear the cause register */
+               mvpp2_write(port->priv, MVPP2_ISR_MISC_CAUSE_REG, 0);
+               mvpp2_write(port->priv, MVPP2_ISR_RX_TX_CAUSE_REG(port->id),
+                           cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
+       }
+
+       /* Release TX descriptors */
+       if (cause_tx) {
+               struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
+               struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
+
+               if (txq_pcpu->count)
+                       mvpp2_txq_done(port, txq, txq_pcpu);
+       }
+}
+
+static int mvpp2_poll(struct napi_struct *napi, int budget)
+{
+       u32 cause_rx_tx, cause_rx;
+       int rx_done = 0;
+       struct mvpp2_port *port = netdev_priv(napi->dev);
+
+       on_each_cpu(mvpp2_txq_done_percpu, port, 1);
+
+       cause_rx_tx = mvpp2_read(port->priv,
+                                MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
+       cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
+
+       /* Process RX packets */
+       cause_rx |= port->pending_cause_rx;
+       while (cause_rx && budget > 0) {
+               int count;
+               struct mvpp2_rx_queue *rxq;
+
+               rxq = mvpp2_get_rx_queue(port, cause_rx);
+               if (!rxq)
+                       break;
+
+               count = mvpp2_rx(port, budget, rxq);
+               rx_done += count;
+               budget -= count;
+               if (budget > 0) {
+                       /* Clear the bit associated to this Rx queue
+                        * so that next iteration will continue from
+                        * the next Rx queue.
+                        */
+                       cause_rx &= ~(1 << rxq->logic_rxq);
+               }
+       }
+
+       if (budget > 0) {
+               cause_rx = 0;
+               napi_complete(napi);
+
+               mvpp2_interrupts_enable(port);
+       }
+       port->pending_cause_rx = cause_rx;
+       return rx_done;
+}
+
+/* Set hw internals when starting port */
+static void mvpp2_start_dev(struct mvpp2_port *port)
+{
+       mvpp2_gmac_max_rx_size_set(port);
+       mvpp2_txp_max_tx_size_set(port);
+
+       napi_enable(&port->napi);
+
+       /* Enable interrupts on all CPUs */
+       mvpp2_interrupts_enable(port);
+
+       mvpp2_port_enable(port);
+       phy_start(port->phy_dev);
+       netif_tx_start_all_queues(port->dev);
+}
+
+/* Set hw internals when stopping port */
+static void mvpp2_stop_dev(struct mvpp2_port *port)
+{
+       /* Stop new packets from arriving to RXQs */
+       mvpp2_ingress_disable(port);
+
+       mdelay(10);
+
+       /* Disable interrupts on all CPUs */
+       mvpp2_interrupts_disable(port);
+
+       napi_disable(&port->napi);
+
+       netif_carrier_off(port->dev);
+       netif_tx_stop_all_queues(port->dev);
+
+       mvpp2_egress_disable(port);
+       mvpp2_port_disable(port);
+       phy_stop(port->phy_dev);
+}
+
+/* Return positive if MTU is valid */
+static inline int mvpp2_check_mtu_valid(struct net_device *dev, int mtu)
+{
+       if (mtu < 68) {
+               netdev_err(dev, "cannot change mtu to less than 68\n");
+               return -EINVAL;
+       }
+
+       /* 9676 == 9700 - 20 and rounding to 8 */
+       if (mtu > 9676) {
+               netdev_info(dev, "illegal MTU value %d, round to 9676\n", mtu);
+               mtu = 9676;
+       }
+
+       if (!IS_ALIGNED(MVPP2_RX_PKT_SIZE(mtu), 8)) {
+               netdev_info(dev, "illegal MTU value %d, round to %d\n", mtu,
+                           ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8));
+               mtu = ALIGN(MVPP2_RX_PKT_SIZE(mtu), 8);
+       }
+
+       return mtu;
+}
+
+static int mvpp2_check_ringparam_valid(struct net_device *dev,
+                                      struct ethtool_ringparam *ring)
+{
+       u16 new_rx_pending = ring->rx_pending;
+       u16 new_tx_pending = ring->tx_pending;
+
+       if (ring->rx_pending == 0 || ring->tx_pending == 0)
+               return -EINVAL;
+
+       if (ring->rx_pending > MVPP2_MAX_RXD)
+               new_rx_pending = MVPP2_MAX_RXD;
+       else if (!IS_ALIGNED(ring->rx_pending, 16))
+               new_rx_pending = ALIGN(ring->rx_pending, 16);
+
+       if (ring->tx_pending > MVPP2_MAX_TXD)
+               new_tx_pending = MVPP2_MAX_TXD;
+       else if (!IS_ALIGNED(ring->tx_pending, 32))
+               new_tx_pending = ALIGN(ring->tx_pending, 32);
+
+       if (ring->rx_pending != new_rx_pending) {
+               netdev_info(dev, "illegal Rx ring size value %d, round to %d\n",
+                           ring->rx_pending, new_rx_pending);
+               ring->rx_pending = new_rx_pending;
+       }
+
+       if (ring->tx_pending != new_tx_pending) {
+               netdev_info(dev, "illegal Tx ring size value %d, round to %d\n",
+                           ring->tx_pending, new_tx_pending);
+               ring->tx_pending = new_tx_pending;
+       }
+
+       return 0;
+}
+
+static void mvpp2_get_mac_address(struct mvpp2_port *port, unsigned char *addr)
+{
+       u32 mac_addr_l, mac_addr_m, mac_addr_h;
+
+       mac_addr_l = readl(port->base + MVPP2_GMAC_CTRL_1_REG);
+       mac_addr_m = readl(port->priv->lms_base + MVPP2_SRC_ADDR_MIDDLE);
+       mac_addr_h = readl(port->priv->lms_base + MVPP2_SRC_ADDR_HIGH);
+       addr[0] = (mac_addr_h >> 24) & 0xFF;
+       addr[1] = (mac_addr_h >> 16) & 0xFF;
+       addr[2] = (mac_addr_h >> 8) & 0xFF;
+       addr[3] = mac_addr_h & 0xFF;
+       addr[4] = mac_addr_m & 0xFF;
+       addr[5] = (mac_addr_l >> MVPP2_GMAC_SA_LOW_OFFS) & 0xFF;
+}
+
+static int mvpp2_phy_connect(struct mvpp2_port *port)
+{
+       struct phy_device *phy_dev;
+
+       phy_dev = of_phy_connect(port->dev, port->phy_node, mvpp2_link_event, 0,
+                                port->phy_interface);
+       if (!phy_dev) {
+               netdev_err(port->dev, "cannot connect to phy\n");
+               return -ENODEV;
+       }
+       phy_dev->supported &= PHY_GBIT_FEATURES;
+       phy_dev->advertising = phy_dev->supported;
+
+       port->phy_dev = phy_dev;
+       port->link    = 0;
+       port->duplex  = 0;
+       port->speed   = 0;
+
+       return 0;
+}
+
+static void mvpp2_phy_disconnect(struct mvpp2_port *port)
+{
+       phy_disconnect(port->phy_dev);
+       port->phy_dev = NULL;
+}
+
+static int mvpp2_open(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       unsigned char mac_bcast[ETH_ALEN] = {
+                       0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
+       int err;
+
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id, mac_bcast, true);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_mac_da_accept BC failed\n");
+               return err;
+       }
+       err = mvpp2_prs_mac_da_accept(port->priv, port->id,
+                                     dev->dev_addr, true);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_mac_da_accept MC failed\n");
+               return err;
+       }
+       err = mvpp2_prs_tag_mode_set(port->priv, port->id, MVPP2_TAG_TYPE_MH);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_tag_mode_set failed\n");
+               return err;
+       }
+       err = mvpp2_prs_def_flow(port);
+       if (err) {
+               netdev_err(dev, "mvpp2_prs_def_flow failed\n");
+               return err;
+       }
+
+       /* Allocate the Rx/Tx queues */
+       err = mvpp2_setup_rxqs(port);
+       if (err) {
+               netdev_err(port->dev, "cannot allocate Rx queues\n");
+               return err;
+       }
+
+       err = mvpp2_setup_txqs(port);
+       if (err) {
+               netdev_err(port->dev, "cannot allocate Tx queues\n");
+               goto err_cleanup_rxqs;
+       }
+
+       err = request_irq(port->irq, mvpp2_isr, 0, dev->name, port);
+       if (err) {
+               netdev_err(port->dev, "cannot request IRQ %d\n", port->irq);
+               goto err_cleanup_txqs;
+       }
+
+       /* In default link is down */
+       netif_carrier_off(port->dev);
+
+       err = mvpp2_phy_connect(port);
+       if (err < 0)
+               goto err_free_irq;
+
+       /* Unmask interrupts on all CPUs */
+       on_each_cpu(mvpp2_interrupts_unmask, port, 1);
+
+       mvpp2_start_dev(port);
+
+       return 0;
+
+err_free_irq:
+       free_irq(port->irq, port);
+err_cleanup_txqs:
+       mvpp2_cleanup_txqs(port);
+err_cleanup_rxqs:
+       mvpp2_cleanup_rxqs(port);
+       return err;
+}
+
+static int mvpp2_stop(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       mvpp2_stop_dev(port);
+       mvpp2_phy_disconnect(port);
+
+       /* Mask interrupts on all CPUs */
+       on_each_cpu(mvpp2_interrupts_mask, port, 1);
+
+       free_irq(port->irq, port);
+       mvpp2_cleanup_rxqs(port);
+       mvpp2_cleanup_txqs(port);
+
+       return 0;
+}
+
+static void mvpp2_set_rx_mode(struct net_device *dev)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       struct mvpp2 *priv = port->priv;
+       struct netdev_hw_addr *ha;
+       int id = port->id;
+       bool allmulti = dev->flags & IFF_ALLMULTI;
+
+       mvpp2_prs_mac_promisc_set(priv, id, dev->flags & IFF_PROMISC);
+       mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_ALL, allmulti);
+       mvpp2_prs_mac_multi_set(priv, id, MVPP2_PE_MAC_MC_IP6, allmulti);
+
+       /* Remove all port->id's mcast enries */
+       mvpp2_prs_mcast_del_all(priv, id);
+
+       if (allmulti && !netdev_mc_empty(dev)) {
+               netdev_for_each_mc_addr(ha, dev)
+                       mvpp2_prs_mac_da_accept(priv, id, ha->addr, true);
+       }
+}
+
+static int mvpp2_set_mac_address(struct net_device *dev, void *p)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       const struct sockaddr *addr = p;
+       int err;
+
+       if (!is_valid_ether_addr(addr->sa_data)) {
+               err = -EADDRNOTAVAIL;
+               goto error;
+       }
+
+       if (!netif_running(dev)) {
+               err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+               if (!err)
+                       return 0;
+               /* Reconfigure parser to accept the original MAC address */
+               err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+               if (err)
+                       goto error;
+       }
+
+       mvpp2_stop_dev(port);
+
+       err = mvpp2_prs_update_mac_da(dev, addr->sa_data);
+       if (!err)
+               goto out_start;
+
+       /* Reconfigure parser accept the original MAC address */
+       err = mvpp2_prs_update_mac_da(dev, dev->dev_addr);
+       if (err)
+               goto error;
+out_start:
+       mvpp2_start_dev(port);
+       mvpp2_egress_enable(port);
+       mvpp2_ingress_enable(port);
+       return 0;
+
+error:
+       netdev_err(dev, "fail to change MAC address\n");
+       return err;
+}
+
+static int mvpp2_change_mtu(struct net_device *dev, int mtu)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int err;
+
+       mtu = mvpp2_check_mtu_valid(dev, mtu);
+       if (mtu < 0) {
+               err = mtu;
+               goto error;
+       }
+
+       if (!netif_running(dev)) {
+               err = mvpp2_bm_update_mtu(dev, mtu);
+               if (!err) {
+                       port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+                       return 0;
+               }
+
+               /* Reconfigure BM to the original MTU */
+               err = mvpp2_bm_update_mtu(dev, dev->mtu);
+               if (err)
+                       goto error;
+       }
+
+       mvpp2_stop_dev(port);
+
+       err = mvpp2_bm_update_mtu(dev, mtu);
+       if (!err) {
+               port->pkt_size =  MVPP2_RX_PKT_SIZE(mtu);
+               goto out_start;
+       }
+
+       /* Reconfigure BM to the original MTU */
+       err = mvpp2_bm_update_mtu(dev, dev->mtu);
+       if (err)
+               goto error;
+
+out_start:
+       mvpp2_start_dev(port);
+       mvpp2_egress_enable(port);
+       mvpp2_ingress_enable(port);
+
+       return 0;
+
+error:
+       netdev_err(dev, "fail to change MTU\n");
+       return err;
+}
+
+static struct rtnl_link_stats64 *
+mvpp2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       unsigned int start;
+       int cpu;
+
+       for_each_possible_cpu(cpu) {
+               struct mvpp2_pcpu_stats *cpu_stats;
+               u64 rx_packets;
+               u64 rx_bytes;
+               u64 tx_packets;
+               u64 tx_bytes;
+
+               cpu_stats = per_cpu_ptr(port->stats, cpu);
+               do {
+                       start = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       rx_packets = cpu_stats->rx_packets;
+                       rx_bytes   = cpu_stats->rx_bytes;
+                       tx_packets = cpu_stats->tx_packets;
+                       tx_bytes   = cpu_stats->tx_bytes;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, start));
+
+               stats->rx_packets += rx_packets;
+               stats->rx_bytes   += rx_bytes;
+               stats->tx_packets += tx_packets;
+               stats->tx_bytes   += tx_bytes;
+       }
+
+       stats->rx_errors        = dev->stats.rx_errors;
+       stats->rx_dropped       = dev->stats.rx_dropped;
+       stats->tx_dropped       = dev->stats.tx_dropped;
+
+       return stats;
+}
+
+static int mvpp2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int ret;
+
+       if (!port->phy_dev)
+               return -ENOTSUPP;
+
+       ret = phy_mii_ioctl(port->phy_dev, ifr, cmd);
+       if (!ret)
+               mvpp2_link_event(dev);
+
+       return ret;
+}
+
+/* Ethtool methods */
+
+/* Get settings (phy address, speed) for ethtools */
+static int mvpp2_ethtool_get_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       if (!port->phy_dev)
+               return -ENODEV;
+       return phy_ethtool_gset(port->phy_dev, cmd);
+}
+
+/* Set settings (phy address, speed) for ethtools */
+static int mvpp2_ethtool_set_settings(struct net_device *dev,
+                                     struct ethtool_cmd *cmd)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       if (!port->phy_dev)
+               return -ENODEV;
+       return phy_ethtool_sset(port->phy_dev, cmd);
+}
+
+/* Set interrupt coalescing for ethtools */
+static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
+                                     struct ethtool_coalesce *c)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       int queue;
+
+       for (queue = 0; queue < rxq_number; queue++) {
+               struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+               rxq->time_coal = c->rx_coalesce_usecs;
+               rxq->pkts_coal = c->rx_max_coalesced_frames;
+               mvpp2_rx_pkts_coal_set(port, rxq, rxq->pkts_coal);
+               mvpp2_rx_time_coal_set(port, rxq, rxq->time_coal);
+       }
+
+       for (queue = 0; queue < txq_number; queue++) {
+               struct mvpp2_tx_queue *txq = port->txqs[queue];
+
+               txq->done_pkts_coal = c->tx_max_coalesced_frames;
+       }
+
+       on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
+       return 0;
+}
+
+/* get coalescing for ethtools */
+static int mvpp2_ethtool_get_coalesce(struct net_device *dev,
+                                     struct ethtool_coalesce *c)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       c->rx_coalesce_usecs        = port->rxqs[0]->time_coal;
+       c->rx_max_coalesced_frames  = port->rxqs[0]->pkts_coal;
+       c->tx_max_coalesced_frames =  port->txqs[0]->done_pkts_coal;
+       return 0;
+}
+
+static void mvpp2_ethtool_get_drvinfo(struct net_device *dev,
+                                     struct ethtool_drvinfo *drvinfo)
+{
+       strlcpy(drvinfo->driver, MVPP2_DRIVER_NAME,
+               sizeof(drvinfo->driver));
+       strlcpy(drvinfo->version, MVPP2_DRIVER_VERSION,
+               sizeof(drvinfo->version));
+       strlcpy(drvinfo->bus_info, dev_name(&dev->dev),
+               sizeof(drvinfo->bus_info));
+}
+
+static void mvpp2_ethtool_get_ringparam(struct net_device *dev,
+                                       struct ethtool_ringparam *ring)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+
+       ring->rx_max_pending = MVPP2_MAX_RXD;
+       ring->tx_max_pending = MVPP2_MAX_TXD;
+       ring->rx_pending = port->rx_ring_size;
+       ring->tx_pending = port->tx_ring_size;
+}
+
+static int mvpp2_ethtool_set_ringparam(struct net_device *dev,
+                                      struct ethtool_ringparam *ring)
+{
+       struct mvpp2_port *port = netdev_priv(dev);
+       u16 prev_rx_ring_size = port->rx_ring_size;
+       u16 prev_tx_ring_size = port->tx_ring_size;
+       int err;
+
+       err = mvpp2_check_ringparam_valid(dev, ring);
+       if (err)
+               return err;
+
+       if (!netif_running(dev)) {
+               port->rx_ring_size = ring->rx_pending;
+               port->tx_ring_size = ring->tx_pending;
+               return 0;
+       }
+
+       /* The interface is running, so we have to force a
+        * reallocation of the queues
+        */
+       mvpp2_stop_dev(port);
+       mvpp2_cleanup_rxqs(port);
+       mvpp2_cleanup_txqs(port);
+
+       port->rx_ring_size = ring->rx_pending;
+       port->tx_ring_size = ring->tx_pending;
+
+       err = mvpp2_setup_rxqs(port);
+       if (err) {
+               /* Reallocate Rx queues with the original ring size */
+               port->rx_ring_size = prev_rx_ring_size;
+               ring->rx_pending = prev_rx_ring_size;
+               err = mvpp2_setup_rxqs(port);
+               if (err)
+                       goto err_out;
+       }
+       err = mvpp2_setup_txqs(port);
+       if (err) {
+               /* Reallocate Tx queues with the original ring size */
+               port->tx_ring_size = prev_tx_ring_size;
+               ring->tx_pending = prev_tx_ring_size;
+               err = mvpp2_setup_txqs(port);
+               if (err)
+                       goto err_clean_rxqs;
+       }
+
+       mvpp2_start_dev(port);
+       mvpp2_egress_enable(port);
+       mvpp2_ingress_enable(port);
+
+       return 0;
+
+err_clean_rxqs:
+       mvpp2_cleanup_rxqs(port);
+err_out:
+       netdev_err(dev, "fail to change ring parameters");
+       return err;
+}
+
+/* Device ops */
+
+static const struct net_device_ops mvpp2_netdev_ops = {
+       .ndo_open               = mvpp2_open,
+       .ndo_stop               = mvpp2_stop,
+       .ndo_start_xmit         = mvpp2_tx,
+       .ndo_set_rx_mode        = mvpp2_set_rx_mode,
+       .ndo_set_mac_address    = mvpp2_set_mac_address,
+       .ndo_change_mtu         = mvpp2_change_mtu,
+       .ndo_get_stats64        = mvpp2_get_stats64,
+       .ndo_do_ioctl           = mvpp2_ioctl,
+};
+
+static const struct ethtool_ops mvpp2_eth_tool_ops = {
+       .get_link       = ethtool_op_get_link,
+       .get_settings   = mvpp2_ethtool_get_settings,
+       .set_settings   = mvpp2_ethtool_set_settings,
+       .set_coalesce   = mvpp2_ethtool_set_coalesce,
+       .get_coalesce   = mvpp2_ethtool_get_coalesce,
+       .get_drvinfo    = mvpp2_ethtool_get_drvinfo,
+       .get_ringparam  = mvpp2_ethtool_get_ringparam,
+       .set_ringparam  = mvpp2_ethtool_set_ringparam,
+};
+
+/* Driver initialization */
+
+static void mvpp2_port_power_up(struct mvpp2_port *port)
+{
+       mvpp2_port_mii_set(port);
+       mvpp2_port_periodic_xon_disable(port);
+       mvpp2_port_fc_adv_enable(port);
+       mvpp2_port_reset(port);
+}
+
+/* Initialize port HW */
+static int mvpp2_port_init(struct mvpp2_port *port)
+{
+       struct device *dev = port->dev->dev.parent;
+       struct mvpp2 *priv = port->priv;
+       struct mvpp2_txq_pcpu *txq_pcpu;
+       int queue, cpu, err;
+
+       if (port->first_rxq + rxq_number > MVPP2_RXQ_TOTAL_NUM)
+               return -EINVAL;
+
+       /* Disable port */
+       mvpp2_egress_disable(port);
+       mvpp2_port_disable(port);
+
+       port->txqs = devm_kcalloc(dev, txq_number, sizeof(*port->txqs),
+                                 GFP_KERNEL);
+       if (!port->txqs)
+               return -ENOMEM;
+
+       /* Associate physical Tx queues to this port and initialize.
+        * The mapping is predefined.
+        */
+       for (queue = 0; queue < txq_number; queue++) {
+               int queue_phy_id = mvpp2_txq_phys(port->id, queue);
+               struct mvpp2_tx_queue *txq;
+
+               txq = devm_kzalloc(dev, sizeof(*txq), GFP_KERNEL);
+               if (!txq)
+                       return -ENOMEM;
+
+               txq->pcpu = alloc_percpu(struct mvpp2_txq_pcpu);
+               if (!txq->pcpu) {
+                       err = -ENOMEM;
+                       goto err_free_percpu;
+               }
+
+               txq->id = queue_phy_id;
+               txq->log_id = queue;
+               txq->done_pkts_coal = MVPP2_TXDONE_COAL_PKTS_THRESH;
+               for_each_present_cpu(cpu) {
+                       txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
+                       txq_pcpu->cpu = cpu;
+               }
+
+               port->txqs[queue] = txq;
+       }
+
+       port->rxqs = devm_kcalloc(dev, rxq_number, sizeof(*port->rxqs),
+                                 GFP_KERNEL);
+       if (!port->rxqs) {
+               err = -ENOMEM;
+               goto err_free_percpu;
+       }
+
+       /* Allocate and initialize Rx queue for this port */
+       for (queue = 0; queue < rxq_number; queue++) {
+               struct mvpp2_rx_queue *rxq;
+
+               /* Map physical Rx queue to port's logical Rx queue */
+               rxq = devm_kzalloc(dev, sizeof(*rxq), GFP_KERNEL);
+               if (!rxq)
+                       goto err_free_percpu;
+               /* Map this Rx queue to a physical queue */
+               rxq->id = port->first_rxq + queue;
+               rxq->port = port->id;
+               rxq->logic_rxq = queue;
+
+               port->rxqs[queue] = rxq;
+       }
+
+       /* Configure Rx queue group interrupt for this port */
+       mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(port->id), rxq_number);
+
+       /* Create Rx descriptor rings */
+       for (queue = 0; queue < rxq_number; queue++) {
+               struct mvpp2_rx_queue *rxq = port->rxqs[queue];
+
+               rxq->size = port->rx_ring_size;
+               rxq->pkts_coal = MVPP2_RX_COAL_PKTS;
+               rxq->time_coal = MVPP2_RX_COAL_USEC;
+       }
+
+       mvpp2_ingress_disable(port);
+
+       /* Port default configuration */
+       mvpp2_defaults_set(port);
+
+       /* Port's classifier configuration */
+       mvpp2_cls_oversize_rxq_set(port);
+       mvpp2_cls_port_config(port);
+
+       /* Provide an initial Rx packet size */
+       port->pkt_size = MVPP2_RX_PKT_SIZE(port->dev->mtu);
+
+       /* Initialize pools for swf */
+       err = mvpp2_swf_bm_pool_init(port);
+       if (err)
+               goto err_free_percpu;
+
+       return 0;
+
+err_free_percpu:
+       for (queue = 0; queue < txq_number; queue++) {
+               if (!port->txqs[queue])
+                       continue;
+               free_percpu(port->txqs[queue]->pcpu);
+       }
+       return err;
+}
+
+/* Ports initialization */
+static int mvpp2_port_probe(struct platform_device *pdev,
+                           struct device_node *port_node,
+                           struct mvpp2 *priv,
+                           int *next_first_rxq)
+{
+       struct device_node *phy_node;
+       struct mvpp2_port *port;
+       struct net_device *dev;
+       struct resource *res;
+       const char *dt_mac_addr;
+       const char *mac_from;
+       char hw_mac_addr[ETH_ALEN];
+       u32 id;
+       int features;
+       int phy_mode;
+       int priv_common_regs_num = 2;
+       int err, i;
+
+       dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
+                                rxq_number);
+       if (!dev)
+               return -ENOMEM;
+
+       phy_node = of_parse_phandle(port_node, "phy", 0);
+       if (!phy_node) {
+               dev_err(&pdev->dev, "missing phy\n");
+               err = -ENODEV;
+               goto err_free_netdev;
+       }
+
+       phy_mode = of_get_phy_mode(port_node);
+       if (phy_mode < 0) {
+               dev_err(&pdev->dev, "incorrect phy mode\n");
+               err = phy_mode;
+               goto err_free_netdev;
+       }
+
+       if (of_property_read_u32(port_node, "port-id", &id)) {
+               err = -EINVAL;
+               dev_err(&pdev->dev, "missing port-id value\n");
+               goto err_free_netdev;
+       }
+
+       dev->tx_queue_len = MVPP2_MAX_TXD;
+       dev->watchdog_timeo = 5 * HZ;
+       dev->netdev_ops = &mvpp2_netdev_ops;
+       dev->ethtool_ops = &mvpp2_eth_tool_ops;
+
+       port = netdev_priv(dev);
+
+       port->irq = irq_of_parse_and_map(port_node, 0);
+       if (port->irq <= 0) {
+               err = -EINVAL;
+               goto err_free_netdev;
+       }
+
+       if (of_property_read_bool(port_node, "marvell,loopback"))
+               port->flags |= MVPP2_F_LOOPBACK;
+
+       port->priv = priv;
+       port->id = id;
+       port->first_rxq = *next_first_rxq;
+       port->phy_node = phy_node;
+       port->phy_interface = phy_mode;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM,
+                                   priv_common_regs_num + id);
+       port->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(port->base)) {
+               err = PTR_ERR(port->base);
+               goto err_free_irq;
+       }
+
+       /* Alloc per-cpu stats */
+       port->stats = netdev_alloc_pcpu_stats(struct mvpp2_pcpu_stats);
+       if (!port->stats) {
+               err = -ENOMEM;
+               goto err_free_irq;
+       }
+
+       dt_mac_addr = of_get_mac_address(port_node);
+       if (dt_mac_addr && is_valid_ether_addr(dt_mac_addr)) {
+               mac_from = "device tree";
+               ether_addr_copy(dev->dev_addr, dt_mac_addr);
+       } else {
+               mvpp2_get_mac_address(port, hw_mac_addr);
+               if (is_valid_ether_addr(hw_mac_addr)) {
+                       mac_from = "hardware";
+                       ether_addr_copy(dev->dev_addr, hw_mac_addr);
+               } else {
+                       mac_from = "random";
+                       eth_hw_addr_random(dev);
+               }
+       }
+
+       port->tx_ring_size = MVPP2_MAX_TXD;
+       port->rx_ring_size = MVPP2_MAX_RXD;
+       port->dev = dev;
+       SET_NETDEV_DEV(dev, &pdev->dev);
+
+       err = mvpp2_port_init(port);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to init port %d\n", id);
+               goto err_free_stats;
+       }
+       mvpp2_port_power_up(port);
+
+       netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
+       features = NETIF_F_SG | NETIF_F_IP_CSUM;
+       dev->features = features | NETIF_F_RXCSUM;
+       dev->hw_features |= features | NETIF_F_RXCSUM | NETIF_F_GRO;
+       dev->vlan_features |= features;
+
+       err = register_netdev(dev);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to register netdev\n");
+               goto err_free_txq_pcpu;
+       }
+       netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
+
+       /* Increment the first Rx queue number to be used by the next port */
+       *next_first_rxq += rxq_number;
+       priv->port_list[id] = port;
+       return 0;
+
+err_free_txq_pcpu:
+       for (i = 0; i < txq_number; i++)
+               free_percpu(port->txqs[i]->pcpu);
+err_free_stats:
+       free_percpu(port->stats);
+err_free_irq:
+       irq_dispose_mapping(port->irq);
+err_free_netdev:
+       free_netdev(dev);
+       return err;
+}
+
+/* Ports removal routine */
+static void mvpp2_port_remove(struct mvpp2_port *port)
+{
+       int i;
+
+       unregister_netdev(port->dev);
+       free_percpu(port->stats);
+       for (i = 0; i < txq_number; i++)
+               free_percpu(port->txqs[i]->pcpu);
+       irq_dispose_mapping(port->irq);
+       free_netdev(port->dev);
+}
+
+/* Initialize decoding windows */
+static void mvpp2_conf_mbus_windows(const struct mbus_dram_target_info *dram,
+                                   struct mvpp2 *priv)
+{
+       u32 win_enable;
+       int i;
+
+       for (i = 0; i < 6; i++) {
+               mvpp2_write(priv, MVPP2_WIN_BASE(i), 0);
+               mvpp2_write(priv, MVPP2_WIN_SIZE(i), 0);
+
+               if (i < 4)
+                       mvpp2_write(priv, MVPP2_WIN_REMAP(i), 0);
+       }
+
+       win_enable = 0;
+
+       for (i = 0; i < dram->num_cs; i++) {
+               const struct mbus_dram_window *cs = dram->cs + i;
+
+               mvpp2_write(priv, MVPP2_WIN_BASE(i),
+                           (cs->base & 0xffff0000) | (cs->mbus_attr << 8) |
+                           dram->mbus_dram_target_id);
+
+               mvpp2_write(priv, MVPP2_WIN_SIZE(i),
+                           (cs->size - 1) & 0xffff0000);
+
+               win_enable |= (1 << i);
+       }
+
+       mvpp2_write(priv, MVPP2_BASE_ADDR_ENABLE, win_enable);
+}
+
+/* Initialize Rx FIFO's */
+static void mvpp2_rx_fifo_init(struct mvpp2 *priv)
+{
+       int port;
+
+       for (port = 0; port < MVPP2_MAX_PORTS; port++) {
+               mvpp2_write(priv, MVPP2_RX_DATA_FIFO_SIZE_REG(port),
+                           MVPP2_RX_FIFO_PORT_DATA_SIZE);
+               mvpp2_write(priv, MVPP2_RX_ATTR_FIFO_SIZE_REG(port),
+                           MVPP2_RX_FIFO_PORT_ATTR_SIZE);
+       }
+
+       mvpp2_write(priv, MVPP2_RX_MIN_PKT_SIZE_REG,
+                   MVPP2_RX_FIFO_PORT_MIN_PKT);
+       mvpp2_write(priv, MVPP2_RX_FIFO_INIT_REG, 0x1);
+}
+
+/* Initialize network controller common part HW */
+static int mvpp2_init(struct platform_device *pdev, struct mvpp2 *priv)
+{
+       const struct mbus_dram_target_info *dram_target_info;
+       int err, i;
+       u32 val;
+
+       /* Checks for hardware constraints */
+       if (rxq_number % 4 || (rxq_number > MVPP2_MAX_RXQ) ||
+           (txq_number > MVPP2_MAX_TXQ)) {
+               dev_err(&pdev->dev, "invalid queue size parameter\n");
+               return -EINVAL;
+       }
+
+       /* MBUS windows configuration */
+       dram_target_info = mv_mbus_dram_info();
+       if (dram_target_info)
+               mvpp2_conf_mbus_windows(dram_target_info, priv);
+
+       /* Disable HW PHY polling */
+       val = readl(priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+       val |= MVPP2_PHY_AN_STOP_SMI0_MASK;
+       writel(val, priv->lms_base + MVPP2_PHY_AN_CFG0_REG);
+
+       /* Allocate and initialize aggregated TXQs */
+       priv->aggr_txqs = devm_kcalloc(&pdev->dev, num_present_cpus(),
+                                      sizeof(struct mvpp2_tx_queue),
+                                      GFP_KERNEL);
+       if (!priv->aggr_txqs)
+               return -ENOMEM;
+
+       for_each_present_cpu(i) {
+               priv->aggr_txqs[i].id = i;
+               priv->aggr_txqs[i].size = MVPP2_AGGR_TXQ_SIZE;
+               err = mvpp2_aggr_txq_init(pdev, &priv->aggr_txqs[i],
+                                         MVPP2_AGGR_TXQ_SIZE, i, priv);
+               if (err < 0)
+                       return err;
+       }
+
+       /* Rx Fifo Init */
+       mvpp2_rx_fifo_init(priv);
+
+       /* Reset Rx queue group interrupt configuration */
+       for (i = 0; i < MVPP2_MAX_PORTS; i++)
+               mvpp2_write(priv, MVPP2_ISR_RXQ_GROUP_REG(i), rxq_number);
+
+       writel(MVPP2_EXT_GLOBAL_CTRL_DEFAULT,
+              priv->lms_base + MVPP2_MNG_EXTENDED_GLOBAL_CTRL_REG);
+
+       /* Allow cache snoop when transmiting packets */
+       mvpp2_write(priv, MVPP2_TX_SNOOP_REG, 0x1);
+
+       /* Buffer Manager initialization */
+       err = mvpp2_bm_init(pdev, priv);
+       if (err < 0)
+               return err;
+
+       /* Parser default initialization */
+       err = mvpp2_prs_default_init(pdev, priv);
+       if (err < 0)
+               return err;
+
+       /* Classifier default initialization */
+       mvpp2_cls_init(priv);
+
+       return 0;
+}
+
+static int mvpp2_probe(struct platform_device *pdev)
+{
+       struct device_node *dn = pdev->dev.of_node;
+       struct device_node *port_node;
+       struct mvpp2 *priv;
+       struct resource *res;
+       int port_count, first_rxq;
+       int err;
+
+       priv = devm_kzalloc(&pdev->dev, sizeof(struct mvpp2), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       priv->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->base))
+               return PTR_ERR(priv->base);
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       priv->lms_base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(priv->lms_base))
+               return PTR_ERR(priv->lms_base);
+
+       priv->pp_clk = devm_clk_get(&pdev->dev, "pp_clk");
+       if (IS_ERR(priv->pp_clk))
+               return PTR_ERR(priv->pp_clk);
+       err = clk_prepare_enable(priv->pp_clk);
+       if (err < 0)
+               return err;
+
+       priv->gop_clk = devm_clk_get(&pdev->dev, "gop_clk");
+       if (IS_ERR(priv->gop_clk)) {
+               err = PTR_ERR(priv->gop_clk);
+               goto err_pp_clk;
+       }
+       err = clk_prepare_enable(priv->gop_clk);
+       if (err < 0)
+               goto err_pp_clk;
+
+       /* Get system's tclk rate */
+       priv->tclk = clk_get_rate(priv->pp_clk);
+
+       /* Initialize network controller */
+       err = mvpp2_init(pdev, priv);
+       if (err < 0) {
+               dev_err(&pdev->dev, "failed to initialize controller\n");
+               goto err_gop_clk;
+       }
+
+       port_count = of_get_available_child_count(dn);
+       if (port_count == 0) {
+               dev_err(&pdev->dev, "no ports enabled\n");
+               err = -ENODEV;
+               goto err_gop_clk;
+       }
+
+       priv->port_list = devm_kcalloc(&pdev->dev, port_count,
+                                     sizeof(struct mvpp2_port *),
+                                     GFP_KERNEL);
+       if (!priv->port_list) {
+               err = -ENOMEM;
+               goto err_gop_clk;
+       }
+
+       /* Initialize ports */
+       first_rxq = 0;
+       for_each_available_child_of_node(dn, port_node) {
+               err = mvpp2_port_probe(pdev, port_node, priv, &first_rxq);
+               if (err < 0)
+                       goto err_gop_clk;
+       }
+
+       platform_set_drvdata(pdev, priv);
+       return 0;
+
+err_gop_clk:
+       clk_disable_unprepare(priv->gop_clk);
+err_pp_clk:
+       clk_disable_unprepare(priv->pp_clk);
+       return err;
+}
+
+static int mvpp2_remove(struct platform_device *pdev)
+{
+       struct mvpp2 *priv = platform_get_drvdata(pdev);
+       struct device_node *dn = pdev->dev.of_node;
+       struct device_node *port_node;
+       int i = 0;
+
+       for_each_available_child_of_node(dn, port_node) {
+               if (priv->port_list[i])
+                       mvpp2_port_remove(priv->port_list[i]);
+               i++;
+       }
+
+       for (i = 0; i < MVPP2_BM_POOLS_NUM; i++) {
+               struct mvpp2_bm_pool *bm_pool = &priv->bm_pools[i];
+
+               mvpp2_bm_pool_destroy(pdev, priv, bm_pool);
+       }
+
+       for_each_present_cpu(i) {
+               struct mvpp2_tx_queue *aggr_txq = &priv->aggr_txqs[i];
+
+               dma_free_coherent(&pdev->dev,
+                                 MVPP2_AGGR_TXQ_SIZE * MVPP2_DESC_ALIGNED_SIZE,
+                                 aggr_txq->descs,
+                                 aggr_txq->descs_phys);
+       }
+
+       clk_disable_unprepare(priv->pp_clk);
+       clk_disable_unprepare(priv->gop_clk);
+
+       return 0;
+}
+
+static const struct of_device_id mvpp2_match[] = {
+       { .compatible = "marvell,armada-375-pp2" },
+       { }
+};
+MODULE_DEVICE_TABLE(of, mvpp2_match);
+
+static struct platform_driver mvpp2_driver = {
+       .probe = mvpp2_probe,
+       .remove = mvpp2_remove,
+       .driver = {
+               .name = MVPP2_DRIVER_NAME,
+               .of_match_table = mvpp2_match,
+       },
+};
+
+module_platform_driver(mvpp2_driver);
+
+MODULE_DESCRIPTION("Marvell PPv2 Ethernet Driver - www.marvell.com");
+MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>");
+MODULE_LICENSE("GPL v2");
index 7f81ae6..e912b68 100644 (file)
@@ -4199,6 +4199,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
                        DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
                },
        },
+       {
+               .ident = "FUJITSU SIEMENS A8NE-FM",
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
+                       DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
+               },
+       },
        {}
 };
 
index 80f7252..56022d6 100644 (file)
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
        init_completion(&cq->free);
 
        cq->irq = priv->eq_table.eq[cq->vector].irq;
-       cq->irq_affinity_change = false;
-
        return 0;
 
 err_radix:
index 4b21307..82322b1 100644 (file)
@@ -128,11 +128,16 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
                                        mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
                                                  name);
                                }
+
                        }
                } else {
                        cq->vector = (cq->ring + 1 + priv->port) %
                                mdev->dev->caps.num_comp_vectors;
                }
+
+               cq->irq_desc =
+                       irq_to_desc(mlx4_eq_get_irq(mdev->dev,
+                                                   cq->vector));
        } else {
                /* For TX we use the same irq per
                ring we assigned for the RX    */
@@ -187,8 +192,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
        mlx4_en_unmap_buffer(&cq->wqres.buf);
        mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
        if (priv->mdev->dev->caps.comp_pool && cq->vector) {
-               if (!cq->is_tx)
-                       irq_set_affinity_hint(cq->mcq.irq, NULL);
                mlx4_release_eq(priv->mdev->dev, cq->vector);
        }
        cq->vector = 0;
@@ -204,6 +207,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
        if (!cq->is_tx) {
                napi_hash_del(&cq->napi);
                synchronize_rcu();
+               irq_set_affinity_hint(cq->mcq.irq, NULL);
        }
        netif_napi_del(&cq->napi);
 
index fa1a069..e22f24f 100644 (file)
@@ -98,6 +98,10 @@ mlx4_en_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
        drvinfo->eedump_len = 0;
 }
 
+static const char mlx4_en_priv_flags[][ETH_GSTRING_LEN] = {
+       "blueflame",
+};
+
 static const char main_strings[][ETH_GSTRING_LEN] = {
        "rx_packets", "tx_packets", "rx_bytes", "tx_bytes", "rx_errors",
        "tx_errors", "rx_dropped", "tx_dropped", "multicast", "collisions",
@@ -235,6 +239,8 @@ static int mlx4_en_get_sset_count(struct net_device *dev, int sset)
        case ETH_SS_TEST:
                return MLX4_EN_NUM_SELF_TEST - !(priv->mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UC_LOOPBACK) * 2;
+       case ETH_SS_PRIV_FLAGS:
+               return ARRAY_SIZE(mlx4_en_priv_flags);
        default:
                return -EOPNOTSUPP;
        }
@@ -358,6 +364,12 @@ static void mlx4_en_get_strings(struct net_device *dev,
 #endif
                }
                break;
+       case ETH_SS_PRIV_FLAGS:
+               for (i = 0; i < ARRAY_SIZE(mlx4_en_priv_flags); i++)
+                       strcpy(data + i * ETH_GSTRING_LEN,
+                              mlx4_en_priv_flags[i]);
+               break;
+
        }
 }
 
@@ -417,6 +429,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
 
        coal->tx_coalesce_usecs = priv->tx_usecs;
        coal->tx_max_coalesced_frames = priv->tx_frames;
+       coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
+
        coal->rx_coalesce_usecs = priv->rx_usecs;
        coal->rx_max_coalesced_frames = priv->rx_frames;
 
@@ -426,6 +440,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
        coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
        coal->rate_sample_interval = priv->sample_interval;
        coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
+
        return 0;
 }
 
@@ -434,6 +449,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
 
+       if (!coal->tx_max_coalesced_frames_irq)
+               return -EINVAL;
+
        priv->rx_frames = (coal->rx_max_coalesced_frames ==
                           MLX4_EN_AUTO_CONF) ?
                                MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +475,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
        priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
        priv->sample_interval = coal->rate_sample_interval;
        priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
+       priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
 
        return mlx4_en_moderation_update(priv);
 }
@@ -1202,6 +1221,49 @@ static int mlx4_en_get_ts_info(struct net_device *dev,
        return ret;
 }
 
+static int mlx4_en_set_priv_flags(struct net_device *dev, u32 flags)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+       bool bf_enabled_new = !!(flags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+       bool bf_enabled_old = !!(priv->pflags & MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+       int i;
+
+       if (bf_enabled_new == bf_enabled_old)
+               return 0; /* Nothing to do */
+
+       if (bf_enabled_new) {
+               bool bf_supported = true;
+
+               for (i = 0; i < priv->tx_ring_num; i++)
+                       bf_supported &= priv->tx_ring[i]->bf_alloced;
+
+               if (!bf_supported) {
+                       en_err(priv, "BlueFlame is not supported\n");
+                       return -EINVAL;
+               }
+
+               priv->pflags |= MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+       } else {
+               priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+       }
+
+       for (i = 0; i < priv->tx_ring_num; i++)
+               priv->tx_ring[i]->bf_enabled = bf_enabled_new;
+
+       en_info(priv, "BlueFlame %s\n",
+               bf_enabled_new ?  "Enabled" : "Disabled");
+
+       return 0;
+}
+
+static u32 mlx4_en_get_priv_flags(struct net_device *dev)
+{
+       struct mlx4_en_priv *priv = netdev_priv(dev);
+
+       return priv->pflags;
+}
+
+
 const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_drvinfo = mlx4_en_get_drvinfo,
        .get_settings = mlx4_en_get_settings,
@@ -1229,6 +1291,8 @@ const struct ethtool_ops mlx4_en_ethtool_ops = {
        .get_channels = mlx4_en_get_channels,
        .set_channels = mlx4_en_set_channels,
        .get_ts_info = mlx4_en_get_ts_info,
+       .set_priv_flags = mlx4_en_set_priv_flags,
+       .get_priv_flags = mlx4_en_get_priv_flags,
 };
 
 
index f953c1d..3626fdf 100644 (file)
@@ -129,8 +129,10 @@ static int mlx4_en_get_profile(struct mlx4_en_dev *mdev)
        int i;
 
        params->udp_rss = udp_rss;
-       params->num_tx_rings_p_up = min_t(int, num_online_cpus(),
-                       MLX4_EN_MAX_TX_RING_P_UP);
+       params->num_tx_rings_p_up = mlx4_low_memory_profile() ?
+               MLX4_EN_MIN_TX_RING_P_UP :
+               min_t(int, num_online_cpus(), MLX4_EN_MAX_TX_RING_P_UP);
+
        if (params->udp_rss && !(mdev->dev->caps.flags
                                        & MLX4_DEV_CAP_FLAG_UDP_RSS)) {
                mlx4_warn(mdev, "UDP RSS is not supported on this device\n");
index 7d4fb7b..bb536aa 100644 (file)
@@ -644,6 +644,7 @@ static int mlx4_en_get_qp(struct mlx4_en_priv *priv)
                goto alloc_err;
        }
        memcpy(entry->mac, priv->dev->dev_addr, sizeof(entry->mac));
+       memcpy(priv->current_mac, entry->mac, sizeof(priv->current_mac));
        entry->reg_id = reg_id;
 
        hlist_add_head_rcu(&entry->hlist,
@@ -760,21 +761,22 @@ static int mlx4_en_replace_mac(struct mlx4_en_priv *priv, int qpn,
        return __mlx4_replace_mac(dev, priv->port, qpn, new_mac_u64);
 }
 
-static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv)
+static int mlx4_en_do_set_mac(struct mlx4_en_priv *priv,
+                             unsigned char new_mac[ETH_ALEN + 2])
 {
        int err = 0;
 
        if (priv->port_up) {
                /* Remove old MAC and insert the new one */
                err = mlx4_en_replace_mac(priv, priv->base_qpn,
-                                         priv->dev->dev_addr, priv->prev_mac);
+                                         new_mac, priv->current_mac);
                if (err)
                        en_err(priv, "Failed changing HW MAC address\n");
        } else
                en_dbg(HW, priv, "Port is down while registering mac, exiting...\n");
 
-       memcpy(priv->prev_mac, priv->dev->dev_addr,
-              sizeof(priv->prev_mac));
+       if (!err)
+               memcpy(priv->current_mac, new_mac, sizeof(priv->current_mac));
 
        return err;
 }
@@ -784,14 +786,17 @@ static int mlx4_en_set_mac(struct net_device *dev, void *addr)
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_en_dev *mdev = priv->mdev;
        struct sockaddr *saddr = addr;
+       unsigned char new_mac[ETH_ALEN + 2];
        int err;
 
        if (!is_valid_ether_addr(saddr->sa_data))
                return -EADDRNOTAVAIL;
 
        mutex_lock(&mdev->state_lock);
-       memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
-       err = mlx4_en_do_set_mac(priv);
+       memcpy(new_mac, saddr->sa_data, ETH_ALEN);
+       err = mlx4_en_do_set_mac(priv, new_mac);
+       if (!err)
+               memcpy(dev->dev_addr, saddr->sa_data, ETH_ALEN);
        mutex_unlock(&mdev->state_lock);
 
        return err;
@@ -940,11 +945,6 @@ static void mlx4_en_set_promisc_mode(struct mlx4_en_priv *priv,
                                          0, MLX4_MCAST_DISABLE);
                if (err)
                        en_err(priv, "Failed disabling multicast filter\n");
-
-               /* Disable port VLAN filter */
-               err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-               if (err)
-                       en_err(priv, "Failed disabling VLAN filter\n");
        }
 }
 
@@ -993,11 +993,6 @@ static void mlx4_en_clear_promisc_mode(struct mlx4_en_priv *priv,
                        en_err(priv, "Failed disabling promiscuous mode\n");
                break;
        }
-
-       /* Enable port VLAN filter */
-       err = mlx4_SET_VLAN_FLTR(mdev->dev, priv);
-       if (err)
-               en_err(priv, "Failed enabling VLAN filter\n");
 }
 
 static void mlx4_en_do_multicast(struct mlx4_en_priv *priv,
@@ -1166,7 +1161,8 @@ static void mlx4_en_do_uc_filter(struct mlx4_en_priv *priv,
                        }
 
                        /* MAC address of the port is not in uc list */
-                       if (ether_addr_equal_64bits(entry->mac, dev->dev_addr))
+                       if (ether_addr_equal_64bits(entry->mac,
+                                                   priv->current_mac))
                                found = true;
 
                        if (!found) {
@@ -1476,7 +1472,7 @@ static void mlx4_en_do_get_stats(struct work_struct *work)
                queue_delayed_work(mdev->workqueue, &priv->stats_task, STATS_DELAY);
        }
        if (mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port]) {
-               mlx4_en_do_set_mac(priv);
+               mlx4_en_do_set_mac(priv, priv->current_mac);
                mdev->mac_removed[MLX4_MAX_PORTS + 1 - priv->port] = 0;
        }
        mutex_unlock(&mdev->state_lock);
@@ -2336,7 +2332,7 @@ static void mlx4_en_add_vxlan_port(struct  net_device *dev,
        struct mlx4_en_priv *priv = netdev_priv(dev);
        __be16 current_port;
 
-       if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS))
+       if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
                return;
 
        if (sa_family == AF_INET6)
@@ -2469,10 +2465,12 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
        priv->port = port;
        priv->port_up = false;
        priv->flags = prof->flags;
+       priv->pflags = MLX4_EN_PRIV_FLAGS_BLUEFLAME;
        priv->ctrl_flags = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE |
                        MLX4_WQE_CTRL_SOLICITED);
        priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
        priv->tx_ring_num = prof->tx_ring_num;
+       priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
 
        priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
                                GFP_KERNEL);
@@ -2534,7 +2532,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
                }
        }
 
-       memcpy(priv->prev_mac, dev->dev_addr, sizeof(priv->prev_mac));
+       memcpy(priv->current_mac, dev->dev_addr, sizeof(priv->current_mac));
 
        priv->stride = roundup_pow_of_two(sizeof(struct mlx4_en_rx_desc) +
                                          DS_SIZE * MLX4_EN_MAX_RX_FRAGS);
index d2d4157..9c909d2 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/if_ether.h>
 #include <linux/if_vlan.h>
 #include <linux/vmalloc.h>
+#include <linux/irq.h>
 
 #include "mlx4_en.h"
 
@@ -334,8 +335,9 @@ void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev)
                                           dev->caps.comp_pool/
                                           dev->caps.num_ports) - 1;
 
-               num_rx_rings = min_t(int, num_of_eqs,
-                                    netif_get_num_default_rss_queues());
+               num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS :
+                       min_t(int, num_of_eqs,
+                             netif_get_num_default_rss_queues());
                mdev->profile.prof[i].rx_ring_num =
                        rounddown_pow_of_two(num_rx_rings);
        }
@@ -782,6 +784,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
                                                             PKT_HASH_TYPE_L3);
 
                                        skb_record_rx_queue(gro_skb, cq->ring);
+                                       skb_mark_napi_id(gro_skb, &cq->napi);
 
                                        if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
                                                timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +899,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
 
        /* If we used up all the quota - we're probably not done yet... */
        if (done == budget) {
+               int cpu_curr;
+               const struct cpumask *aff;
+
                INC_PERF_COUNTER(priv->pstats.napi_quota);
-               if (unlikely(cq->mcq.irq_affinity_change)) {
-                       cq->mcq.irq_affinity_change = false;
+
+               cpu_curr = smp_processor_id();
+               aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
+
+               if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
+                       /* Current cpu is not according to smp_irq_affinity -
+                        * probably affinity changed. need to stop this NAPI
+                        * poll, and restart it on the right CPU
+                        */
                        napi_complete(napi);
                        mlx4_en_arm_cq(priv, cq);
                        return 0;
                }
        } else {
                /* Done for now */
-               cq->mcq.irq_affinity_change = false;
                napi_complete(napi);
                mlx4_en_arm_cq(priv, cq);
        }
@@ -922,7 +934,7 @@ static const int frag_sizes[] = {
 void mlx4_en_calc_rx_buf(struct net_device *dev)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN + ETH_LLC_SNAP_SIZE;
+       int eff_mtu = dev->mtu + ETH_HLEN + VLAN_HLEN;
        int buf_size = 0;
        int i = 0;
 
index 03e5f6a..49d5afc 100644 (file)
@@ -159,7 +159,8 @@ void mlx4_en_ex_selftest(struct net_device *dev, u32 *flags, u64 *buf)
                if (priv->mdev->dev->caps.flags &
                                        MLX4_DEV_CAP_FLAG_UC_LOOPBACK) {
                        buf[3] = mlx4_en_test_registers(priv);
-                       buf[4] = mlx4_en_test_loopback(priv);
+                       if (priv->port_up)
+                               buf[4] = mlx4_en_test_loopback(priv);
                }
 
                if (carrier_ok)
index 8be7483..dae3da6 100644 (file)
@@ -126,8 +126,13 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
                ring->bf.uar = &mdev->priv_uar;
                ring->bf.uar->map = mdev->uar_map;
                ring->bf_enabled = false;
-       } else
-               ring->bf_enabled = true;
+               ring->bf_alloced = false;
+               priv->pflags &= ~MLX4_EN_PRIV_FLAGS_BLUEFLAME;
+       } else {
+               ring->bf_alloced = true;
+               ring->bf_enabled = !!(priv->pflags &
+                                     MLX4_EN_PRIV_FLAGS_BLUEFLAME);
+       }
 
        ring->hwtstamp_tx_type = priv->hwtstamp_config.tx_type;
        ring->queue_index = queue_index;
@@ -161,7 +166,7 @@ void mlx4_en_destroy_tx_ring(struct mlx4_en_priv *priv,
        struct mlx4_en_tx_ring *ring = *pring;
        en_dbg(DRV, priv, "Destroying tx ring, qpn: %d\n", ring->qpn);
 
-       if (ring->bf_enabled)
+       if (ring->bf_alloced)
                mlx4_bf_free(mdev->dev, &ring->bf);
        mlx4_qp_remove(mdev->dev, &ring->qp);
        mlx4_qp_free(mdev->dev, &ring->qp);
@@ -195,7 +200,7 @@ int mlx4_en_activate_tx_ring(struct mlx4_en_priv *priv,
 
        mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
                                ring->cqn, user_prio, &ring->context);
-       if (ring->bf_enabled)
+       if (ring->bf_alloced)
                ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
 
        err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
@@ -351,9 +356,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
        return cnt;
 }
 
-static int mlx4_en_process_tx_cq(struct net_device *dev,
-                                struct mlx4_en_cq *cq,
-                                int budget)
+static bool mlx4_en_process_tx_cq(struct net_device *dev,
+                                struct mlx4_en_cq *cq)
 {
        struct mlx4_en_priv *priv = netdev_priv(dev);
        struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +376,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
        int factor = priv->cqe_factor;
        u64 timestamp = 0;
        int done = 0;
+       int budget = priv->tx_work_limit;
 
        if (!priv->port_up)
-               return 0;
+               return true;
 
        index = cons_index & size_mask;
        cqe = &buf[(index << factor) + factor];
@@ -447,7 +452,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
                netif_tx_wake_queue(ring->tx_queue);
                ring->wake_queue++;
        }
-       return done;
+       return done < budget;
 }
 
 void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +472,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
        struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
        struct net_device *dev = cq->dev;
        struct mlx4_en_priv *priv = netdev_priv(dev);
-       int done;
+       int clean_complete;
 
-       done = mlx4_en_process_tx_cq(dev, cq, budget);
+       clean_complete = mlx4_en_process_tx_cq(dev, cq);
+       if (!clean_complete)
+               return budget;
 
-       /* If we used up all the quota - we're probably not done yet... */
-       if (done < budget) {
-               /* Done for now */
-               cq->mcq.irq_affinity_change = false;
-               napi_complete(napi);
-               mlx4_en_arm_cq(priv, cq);
-               return done;
-       } else if (unlikely(cq->mcq.irq_affinity_change)) {
-               cq->mcq.irq_affinity_change = false;
-               napi_complete(napi);
-               mlx4_en_arm_cq(priv, cq);
-               return 0;
-       }
-       return budget;
+       napi_complete(napi);
+       mlx4_en_arm_cq(priv, cq);
+
+       return 0;
 }
 
 static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
index d954ec1..2a004b3 100644 (file)
@@ -53,11 +53,6 @@ enum {
        MLX4_EQ_ENTRY_SIZE      = 0x20
 };
 
-struct mlx4_irq_notify {
-       void *arg;
-       struct irq_affinity_notify notify;
-};
-
 #define MLX4_EQ_STATUS_OK         ( 0 << 28)
 #define MLX4_EQ_STATUS_WRITE_FAIL  (10 << 28)
 #define MLX4_EQ_OWNER_SW          ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
        iounmap(priv->clr_base);
 }
 
-static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
-                                    const cpumask_t *mask)
-{
-       struct mlx4_irq_notify *n = container_of(notify,
-                                                struct mlx4_irq_notify,
-                                                notify);
-       struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
-       struct radix_tree_iter iter;
-       void **slot;
-
-       radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
-               struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
-
-               if (cq->irq == notify->irq)
-                       cq->irq_affinity_change = true;
-       }
-}
-
-static void mlx4_release_irq_notifier(struct kref *ref)
-{
-       struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
-                                                notify.kref);
-       kfree(n);
-}
-
-static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
-                                    struct mlx4_dev *dev, int irq)
-{
-       struct mlx4_irq_notify *irq_notifier = NULL;
-       int err = 0;
-
-       irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
-       if (!irq_notifier) {
-               mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
-                         irq);
-               return;
-       }
-
-       irq_notifier->notify.irq = irq;
-       irq_notifier->notify.notify = mlx4_irq_notifier_notify;
-       irq_notifier->notify.release = mlx4_release_irq_notifier;
-       irq_notifier->arg = priv;
-       err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
-       if (err) {
-               kfree(irq_notifier);
-               irq_notifier = NULL;
-               mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
-       }
-}
-
-
 int mlx4_alloc_eq_table(struct mlx4_dev *dev)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                                continue;
                                /*we dont want to break here*/
                        }
-                       mlx4_assign_irq_notifier(priv, dev,
-                                                priv->eq_table.eq[vec].irq);
 
                        eq_set_ci(&priv->eq_table.eq[vec], 1);
                }
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
 }
 EXPORT_SYMBOL(mlx4_assign_eq);
 
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
+{
+       struct mlx4_priv *priv = mlx4_priv(dev);
+
+       return priv->eq_table.eq[vec].irq;
+}
+EXPORT_SYMBOL(mlx4_eq_get_irq);
+
 void mlx4_release_eq(struct mlx4_dev *dev, int vec)
 {
        struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
                  Belonging to a legacy EQ*/
                mutex_lock(&priv->msix_ctl.pool_lock);
                if (priv->msix_ctl.pool_bm & 1ULL << i) {
-                       irq_set_affinity_notifier(
-                               priv->eq_table.eq[vec].irq,
-                               NULL);
                        free_irq(priv->eq_table.eq[vec].irq,
                                 &priv->eq_table.eq[vec]);
                        priv->msix_ctl.pool_bm &= ~(1ULL << i);
index 5f42f6d..80b8c5f 100644 (file)
@@ -120,6 +120,16 @@ static struct mlx4_profile default_profile = {
        .num_mtt        = 1 << 20, /* It is really num mtt segements */
 };
 
+static struct mlx4_profile low_mem_profile = {
+       .num_qp         = 1 << 17,
+       .num_srq        = 1 << 6,
+       .rdmarc_per_qp  = 1 << 4,
+       .num_cq         = 1 << 8,
+       .num_mcg        = 1 << 8,
+       .num_mpt        = 1 << 9,
+       .num_mtt        = 1 << 7,
+};
+
 static int log_num_mac = 7;
 module_param_named(log_num_mac, log_num_mac, int, 0444);
 MODULE_PARM_DESC(log_num_mac, "Log2 max number of MACs per ETH port (1-7)");
@@ -129,6 +139,8 @@ module_param_named(log_num_vlan, log_num_vlan, int, 0444);
 MODULE_PARM_DESC(log_num_vlan, "Log2 max number of VLANs per ETH port (0-7)");
 /* Log2 max number of VLANs per ETH port (0-7) */
 #define MLX4_LOG_NUM_VLANS 7
+#define MLX4_MIN_LOG_NUM_VLANS 0
+#define MLX4_MIN_LOG_NUM_MAC 1
 
 static bool use_prio;
 module_param_named(use_prio, use_prio, bool, 0444);
@@ -287,8 +299,13 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
        if (mlx4_is_mfunc(dev))
                dev->caps.flags &= ~MLX4_DEV_CAP_FLAG_SENSE_SUPPORT;
 
-       dev->caps.log_num_macs  = log_num_mac;
-       dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
+       if (mlx4_low_memory_profile()) {
+               dev->caps.log_num_macs  = MLX4_MIN_LOG_NUM_MAC;
+               dev->caps.log_num_vlans = MLX4_MIN_LOG_NUM_VLANS;
+       } else {
+               dev->caps.log_num_macs  = log_num_mac;
+               dev->caps.log_num_vlans = MLX4_LOG_NUM_VLANS;
+       }
 
        for (i = 1; i <= dev->caps.num_ports; ++i) {
                dev->caps.port_type[i] = MLX4_PORT_TYPE_NONE;
@@ -1587,7 +1604,12 @@ static int mlx4_init_hca(struct mlx4_dev *dev)
                if (mlx4_is_master(dev))
                        mlx4_parav_master_pf_caps(dev);
 
-               profile = default_profile;
+               if (mlx4_low_memory_profile()) {
+                       mlx4_info(dev, "Running from within kdump kernel. Using low memory profile\n");
+                       profile = low_mem_profile;
+               } else {
+                       profile = default_profile;
+               }
                if (dev->caps.steering_mode ==
                    MLX4_STEERING_MODE_DEVICE_MANAGED)
                        profile.num_mcg = MLX4_FS_NUM_MCG;
@@ -2439,7 +2461,8 @@ slave_start:
                            (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
                                mlx4_err(dev,
                                         "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
-                               goto err_close;
+                               err = -EINVAL;
+                               goto err_master_mfunc;
                        }
                        for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
                                unsigned j;
index 4c36def..d80e7a6 100644 (file)
@@ -270,7 +270,7 @@ static int existing_steering_entry(struct mlx4_dev *dev, u8 port,
         * we need to add it as a duplicate to this entry
         * for future references */
        list_for_each_entry(dqp, &entry->duplicates, list) {
-               if (qpn == pqp->qpn)
+               if (qpn == dqp->qpn)
                        return 0; /* qp is already duplicated */
        }
 
@@ -324,24 +324,22 @@ static bool check_duplicate_entry(struct mlx4_dev *dev, u8 port,
        return true;
 }
 
-/* I a steering entry contains only promisc QPs, it can be removed. */
-static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
-                                     enum mlx4_steer_type steer,
-                                     unsigned int index, u32 tqpn)
+/* Returns true if all the QPs != tqpn contained in this entry
+ * are Promisc QPs. Returns false otherwise.
+ */
+static bool promisc_steering_entry(struct mlx4_dev *dev, u8 port,
+                                  enum mlx4_steer_type steer,
+                                  unsigned int index, u32 tqpn,
+                                  u32 *members_count)
 {
-       struct mlx4_steer *s_steer;
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_mgm *mgm;
-       struct mlx4_steer_index *entry = NULL, *tmp_entry;
-       u32 qpn;
-       u32 members_count;
+       u32 m_count;
        bool ret = false;
        int i;
 
        if (port < 1 || port > dev->caps.num_ports)
-               return NULL;
-
-       s_steer = &mlx4_priv(dev)->steer[port - 1];
+               return false;
 
        mailbox = mlx4_alloc_cmd_mailbox(dev);
        if (IS_ERR(mailbox))
@@ -350,21 +348,61 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
 
        if (mlx4_READ_ENTRY(dev, index, mailbox))
                goto out;
-       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       for (i = 0;  i < members_count; i++) {
-               qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
+       m_count = be32_to_cpu(mgm->members_count) & 0xffffff;
+       if (members_count)
+               *members_count = m_count;
+
+       for (i = 0;  i < m_count; i++) {
+               u32 qpn = be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK;
                if (!get_promisc_qp(dev, port, steer, qpn) && qpn != tqpn) {
                        /* the qp is not promisc, the entry can't be removed */
                        goto out;
                }
        }
-        /* All the qps currently registered for this entry are promiscuous,
+       ret = true;
+out:
+       mlx4_free_cmd_mailbox(dev, mailbox);
+       return ret;
+}
+
+/* IF a steering entry contains only promisc QPs, it can be removed. */
+static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
+                                     enum mlx4_steer_type steer,
+                                     unsigned int index, u32 tqpn)
+{
+       struct mlx4_steer *s_steer;
+       struct mlx4_steer_index *entry = NULL, *tmp_entry;
+       u32 members_count;
+       bool ret = false;
+
+       if (port < 1 || port > dev->caps.num_ports)
+               return NULL;
+
+       s_steer = &mlx4_priv(dev)->steer[port - 1];
+
+       if (!promisc_steering_entry(dev, port, steer, index,
+                                   tqpn, &members_count))
+               goto out;
+
+       /* All the qps currently registered for this entry are promiscuous,
          * Checking for duplicates */
        ret = true;
        list_for_each_entry_safe(entry, tmp_entry, &s_steer->steer_entries[steer], list) {
                if (entry->index == index) {
-                       if (list_empty(&entry->duplicates)) {
+                       if (list_empty(&entry->duplicates) ||
+                           members_count == 1) {
+                               struct mlx4_promisc_qp *pqp, *tmp_pqp;
+                               /* If there is only 1 entry in duplicates then
+                                * this is the QP we want to delete, going over
+                                * the list and deleting the entry.
+                                */
                                list_del(&entry->list);
+                               list_for_each_entry_safe(pqp, tmp_pqp,
+                                                        &entry->duplicates,
+                                                        list) {
+                                       list_del(&pqp->list);
+                                       kfree(pqp);
+                               }
                                kfree(entry);
                        } else {
                                /* This entry contains duplicates so it shouldn't be removed */
@@ -375,7 +413,6 @@ static bool can_remove_steering_entry(struct mlx4_dev *dev, u8 port,
        }
 
 out:
-       mlx4_free_cmd_mailbox(dev, mailbox);
        return ret;
 }
 
@@ -421,42 +458,57 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
        }
        mgm = mailbox->buf;
 
-       /* the promisc qp needs to be added for each one of the steering
-        * entries, if it already exists, needs to be added as a duplicate
-        * for this entry */
-       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
-               err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
-               if (err)
-                       goto out_mailbox;
+       if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
+               /* The promisc QP needs to be added for each one of the steering
+                * entries. If it already exists, needs to be added as
+                * a duplicate for this entry.
+                */
+               list_for_each_entry(entry,
+                                   &s_steer->steer_entries[steer],
+                                   list) {
+                       err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
+                       if (err)
+                               goto out_mailbox;
 
-               members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-               prot = be32_to_cpu(mgm->members_count) >> 30;
-               found = false;
-               for (i = 0; i < members_count; i++) {
-                       if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn) {
-                               /* Entry already exists, add to duplicates */
-                               dqp = kmalloc(sizeof *dqp, GFP_KERNEL);
-                               if (!dqp) {
+                       members_count = be32_to_cpu(mgm->members_count) &
+                                       0xffffff;
+                       prot = be32_to_cpu(mgm->members_count) >> 30;
+                       found = false;
+                       for (i = 0; i < members_count; i++) {
+                               if ((be32_to_cpu(mgm->qp[i]) &
+                                    MGM_QPN_MASK) == qpn) {
+                                       /* Entry already exists.
+                                        * Add to duplicates.
+                                        */
+                                       dqp = kmalloc(sizeof(*dqp), GFP_KERNEL);
+                                       if (!dqp) {
+                                               err = -ENOMEM;
+                                               goto out_mailbox;
+                                       }
+                                       dqp->qpn = qpn;
+                                       list_add_tail(&dqp->list,
+                                                     &entry->duplicates);
+                                       found = true;
+                               }
+                       }
+                       if (!found) {
+                               /* Need to add the qpn to mgm */
+                               if (members_count ==
+                                   dev->caps.num_qp_per_mgm) {
+                                       /* entry is full */
                                        err = -ENOMEM;
                                        goto out_mailbox;
                                }
-                               dqp->qpn = qpn;
-                               list_add_tail(&dqp->list, &entry->duplicates);
-                               found = true;
-                       }
-               }
-               if (!found) {
-                       /* Need to add the qpn to mgm */
-                       if (members_count == dev->caps.num_qp_per_mgm) {
-                               /* entry is full */
-                               err = -ENOMEM;
-                               goto out_mailbox;
+                               mgm->qp[members_count++] =
+                                       cpu_to_be32(qpn & MGM_QPN_MASK);
+                               mgm->members_count =
+                                       cpu_to_be32(members_count |
+                                                   (prot << 30));
+                               err = mlx4_WRITE_ENTRY(dev, entry->index,
+                                                      mailbox);
+                               if (err)
+                                       goto out_mailbox;
                        }
-                       mgm->qp[members_count++] = cpu_to_be32(qpn & MGM_QPN_MASK);
-                       mgm->members_count = cpu_to_be32(members_count | (prot << 30));
-                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
-                       if (err)
-                               goto out_mailbox;
                }
        }
 
@@ -465,8 +517,14 @@ static int add_promisc_qp(struct mlx4_dev *dev, u8 port,
        /* now need to add all the promisc qps to default entry */
        memset(mgm, 0, sizeof *mgm);
        members_count = 0;
-       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list)
+       list_for_each_entry(dqp, &s_steer->promisc_qps[steer], list) {
+               if (members_count == dev->caps.num_qp_per_mgm) {
+                       /* entry is full */
+                       err = -ENOMEM;
+                       goto out_list;
+               }
                mgm->qp[members_count++] = cpu_to_be32(dqp->qpn & MGM_QPN_MASK);
+       }
        mgm->members_count = cpu_to_be32(members_count | MLX4_PROT_ETH << 30);
 
        err = mlx4_WRITE_PROMISC(dev, port, steer, mailbox);
@@ -495,13 +553,13 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
        struct mlx4_steer *s_steer;
        struct mlx4_cmd_mailbox *mailbox;
        struct mlx4_mgm *mgm;
-       struct mlx4_steer_index *entry;
+       struct mlx4_steer_index *entry, *tmp_entry;
        struct mlx4_promisc_qp *pqp;
        struct mlx4_promisc_qp *dqp;
        u32 members_count;
        bool found;
        bool back_to_list = false;
-       int loc, i;
+       int i;
        int err;
 
        if (port < 1 || port > dev->caps.num_ports)
@@ -538,39 +596,73 @@ static int remove_promisc_qp(struct mlx4_dev *dev, u8 port,
        if (err)
                goto out_mailbox;
 
-       /* remove the qp from all the steering entries*/
-       list_for_each_entry(entry, &s_steer->steer_entries[steer], list) {
-               found = false;
-               list_for_each_entry(dqp, &entry->duplicates, list) {
-                       if (dqp->qpn == qpn) {
-                               found = true;
-                               break;
+       if (!(mlx4_is_mfunc(dev) && steer == MLX4_UC_STEER)) {
+               /* Remove the QP from all the steering entries */
+               list_for_each_entry_safe(entry, tmp_entry,
+                                        &s_steer->steer_entries[steer],
+                                        list) {
+                       found = false;
+                       list_for_each_entry(dqp, &entry->duplicates, list) {
+                               if (dqp->qpn == qpn) {
+                                       found = true;
+                                       break;
+                               }
                        }
-               }
-               if (found) {
-                       /* a duplicate, no need to change the mgm,
-                        * only update the duplicates list */
-                       list_del(&dqp->list);
-                       kfree(dqp);
-               } else {
-                       err = mlx4_READ_ENTRY(dev, entry->index, mailbox);
-                               if (err)
-                                       goto out_mailbox;
-                       members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-                       for (loc = -1, i = 0; i < members_count; ++i)
-                               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qpn)
-                                       loc = i;
-
-                       mgm->members_count = cpu_to_be32(--members_count |
-                                                        (MLX4_PROT_ETH << 30));
-                       mgm->qp[loc] = mgm->qp[i - 1];
-                       mgm->qp[i - 1] = 0;
+                       if (found) {
+                               /* A duplicate, no need to change the MGM,
+                                * only update the duplicates list
+                                */
+                               list_del(&dqp->list);
+                               kfree(dqp);
+                       } else {
+                               int loc = -1;
+
+                               err = mlx4_READ_ENTRY(dev,
+                                                     entry->index,
+                                                     mailbox);
+                                       if (err)
+                                               goto out_mailbox;
+                               members_count =
+                                       be32_to_cpu(mgm->members_count) &
+                                       0xffffff;
+                               if (!members_count) {
+                                       mlx4_warn(dev, "QP %06x wasn't found in entry %x mcount=0. deleting entry...\n",
+                                                 qpn, entry->index);
+                                       list_del(&entry->list);
+                                       kfree(entry);
+                                       continue;
+                               }
 
-                       err = mlx4_WRITE_ENTRY(dev, entry->index, mailbox);
-                               if (err)
+                               for (i = 0; i < members_count; ++i)
+                                       if ((be32_to_cpu(mgm->qp[i]) &
+                                            MGM_QPN_MASK) == qpn) {
+                                               loc = i;
+                                               break;
+                                       }
+
+                               if (loc < 0) {
+                                       mlx4_err(dev, "QP %06x wasn't found in entry %d\n",
+                                                qpn, entry->index);
+                                       err = -EINVAL;
                                        goto out_mailbox;
-               }
+                               }
 
+                               /* Copy the last QP in this MGM
+                                * over removed QP
+                                */
+                               mgm->qp[loc] = mgm->qp[members_count - 1];
+                               mgm->qp[members_count - 1] = 0;
+                               mgm->members_count =
+                                       cpu_to_be32(--members_count |
+                                                   (MLX4_PROT_ETH << 30));
+
+                               err = mlx4_WRITE_ENTRY(dev,
+                                                      entry->index,
+                                                      mailbox);
+                                       if (err)
+                                               goto out_mailbox;
+                       }
+               }
        }
 
 out_mailbox:
@@ -1062,7 +1154,7 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
        struct mlx4_mgm *mgm;
        u32 members_count;
        int prev, index;
-       int i, loc;
+       int i, loc = -1;
        int err;
        u8 port = gid[5];
        bool removed_entry = false;
@@ -1085,15 +1177,20 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                goto out;
        }
 
-       /* if this pq is also a promisc qp, it shouldn't be removed */
+       /* If this QP is also a promisc QP, it shouldn't be removed only if
+        * at least one none promisc QP is also attached to this MCG
+        */
        if (prot == MLX4_PROT_ETH &&
-           check_duplicate_entry(dev, port, steer, index, qp->qpn))
-               goto out;
+           check_duplicate_entry(dev, port, steer, index, qp->qpn) &&
+           !promisc_steering_entry(dev, port, steer, index, qp->qpn, NULL))
+                       goto out;
 
        members_count = be32_to_cpu(mgm->members_count) & 0xffffff;
-       for (loc = -1, i = 0; i < members_count; ++i)
-               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn)
+       for (i = 0; i < members_count; ++i)
+               if ((be32_to_cpu(mgm->qp[i]) & MGM_QPN_MASK) == qp->qpn) {
                        loc = i;
+                       break;
+               }
 
        if (loc == -1) {
                mlx4_err(dev, "QP %06x not found in MGM\n", qp->qpn);
@@ -1101,15 +1198,15 @@ int mlx4_qp_detach_common(struct mlx4_dev *dev, struct mlx4_qp *qp, u8 gid[16],
                goto out;
        }
 
-
+       /* copy the last QP in this MGM over removed QP */
+       mgm->qp[loc] = mgm->qp[members_count - 1];
+       mgm->qp[members_count - 1] = 0;
        mgm->members_count = cpu_to_be32(--members_count | (u32) prot << 30);
-       mgm->qp[loc]       = mgm->qp[i - 1];
-       mgm->qp[i - 1]     = 0;
 
        if (prot == MLX4_PROT_ETH)
                removed_entry = can_remove_steering_entry(dev, port, steer,
                                                                index, qp->qpn);
-       if (i != 1 && (prot != MLX4_PROT_ETH || !removed_entry)) {
+       if (members_count && (prot != MLX4_PROT_ETH || !removed_entry)) {
                err = mlx4_WRITE_ENTRY(dev, index, mailbox);
                goto out;
        }
index 1d8af73..13fbcd0 100644 (file)
 
 #define INIT_HCA_TPT_MW_ENABLE          (1 << 7)
 
-#define MLX4_NUM_UP            8
-#define MLX4_NUM_TC            8
-#define MLX4_RATELIMIT_UNITS 3 /* 100 Mbps */
-#define MLX4_RATELIMIT_DEFAULT 0xffff
-
 struct mlx4_set_port_prio2tc_context {
        u8 prio2tc[4];
 };
index 0e15295..3de41be 100644 (file)
@@ -93,6 +93,8 @@
  * OS related constants and tunables
  */
 
+#define MLX4_EN_PRIV_FLAGS_BLUEFLAME 1
+
 #define MLX4_EN_WATCHDOG_TIMEOUT       (15 * HZ)
 
 /* Use the maximum between 16384 and a single page */
@@ -119,6 +121,7 @@ enum {
 #define MLX4_EN_MIN_TX_SIZE    (4096 / TXBB_SIZE)
 
 #define MLX4_EN_SMALL_PKT_SIZE         64
+#define MLX4_EN_MIN_TX_RING_P_UP       1
 #define MLX4_EN_MAX_TX_RING_P_UP       32
 #define MLX4_EN_NUM_UP                 8
 #define MLX4_EN_DEF_TX_RING_SIZE       512
@@ -126,6 +129,8 @@ enum {
 #define MAX_TX_RINGS                   (MLX4_EN_MAX_TX_RING_P_UP * \
                                         MLX4_EN_NUM_UP)
 
+#define MLX4_EN_DEFAULT_TX_WORK                256
+
 /* Target number of packets to coalesce with interrupt moderation */
 #define MLX4_EN_RX_COAL_TARGET 44
 #define MLX4_EN_RX_COAL_TIME   0x10
@@ -152,8 +157,6 @@ enum {
 #define MLX4_EN_TX_POLL_MODER  16
 #define MLX4_EN_TX_POLL_TIMEOUT        (HZ / 4)
 
-#define ETH_LLC_SNAP_SIZE      8
-
 #define SMALL_PACKET_SIZE      (256 - NET_IP_ALIGN)
 #define HEADER_COPY_SIZE       (128 - NET_IP_ALIGN)
 #define MLX4_LOOPBACK_TEST_PAYLOAD (HEADER_COPY_SIZE - ETH_HLEN)
@@ -278,6 +281,7 @@ struct mlx4_en_tx_ring {
        unsigned long wake_queue;
        struct mlx4_bf bf;
        bool bf_enabled;
+       bool bf_alloced;
        struct netdev_queue *tx_queue;
        int hwtstamp_tx_type;
        int inline_thold;
@@ -343,6 +347,7 @@ struct mlx4_en_cq {
 #define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
        spinlock_t poll_lock; /* protects from LLS/napi conflicts */
 #endif  /* CONFIG_NET_RX_BUSY_POLL */
+       struct irq_desc *irq_desc;
 };
 
 struct mlx4_en_port_profile {
@@ -532,7 +537,7 @@ struct mlx4_en_priv {
        int registered;
        int allocated;
        int stride;
-       unsigned char prev_mac[ETH_ALEN + 2];
+       unsigned char current_mac[ETH_ALEN + 2];
        int mac_index;
        unsigned max_mtu;
        int base_qpn;
@@ -542,6 +547,7 @@ struct mlx4_en_priv {
        __be32 ctrl_flags;
        u32 flags;
        u8 num_tx_rings_p_up;
+       u32 tx_work_limit;
        u32 tx_ring_num;
        u32 rx_ring_num;
        u32 rx_skb_size;
@@ -590,6 +596,8 @@ struct mlx4_en_priv {
 #endif
        u64 tunnel_reg_id;
        __be16 vxlan_port;
+
+       u32 pflags;
 };
 
 enum mlx4_en_wol {
index 7ab9717..9ba0c1c 100644 (file)
@@ -244,10 +244,16 @@ EXPORT_SYMBOL_GPL(mlx4_get_base_qpn);
 
 void __mlx4_unregister_mac(struct mlx4_dev *dev, u8 port, u64 mac)
 {
-       struct mlx4_port_info *info = &mlx4_priv(dev)->port[port];
-       struct mlx4_mac_table *table = &info->mac_table;
+       struct mlx4_port_info *info;
+       struct mlx4_mac_table *table;
        int index;
 
+       if (port < 1 || port > dev->caps.num_ports) {
+               mlx4_warn(dev, "invalid port number (%d), aborting...\n", port);
+               return;
+       }
+       info = &mlx4_priv(dev)->port[port];
+       table = &info->mac_table;
        mutex_lock(&table->mutex);
        index = find_index(dev, table, mac);
 
@@ -1051,14 +1057,26 @@ int mlx4_SET_PORT_SCHEDULER(struct mlx4_dev *dev, u8 port, u8 *tc_tx_bw,
 
        for (i = 0; i < MLX4_NUM_TC; i++) {
                struct mlx4_port_scheduler_tc_cfg_be *tc = &context->tc[i];
-               u16 r = ratelimit && ratelimit[i] ? ratelimit[i] :
-                       MLX4_RATELIMIT_DEFAULT;
+               u16 r;
+
+               if (ratelimit && ratelimit[i]) {
+                       if (ratelimit[i] <= MLX4_MAX_100M_UNITS_VAL) {
+                               r = ratelimit[i];
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_100M_UNITS);
+                       } else {
+                               r = ratelimit[i]/10;
+                               tc->max_bw_units =
+                                       htons(MLX4_RATELIMIT_1G_UNITS);
+                       }
+                       tc->max_bw_value = htons(r);
+               } else {
+                       tc->max_bw_value = htons(MLX4_RATELIMIT_DEFAULT);
+                       tc->max_bw_units = htons(MLX4_RATELIMIT_1G_UNITS);
+               }
 
                tc->pg = htons(pg[i]);
                tc->bw_precentage = htons(tc_tx_bw[i]);
-
-               tc->max_bw_units = htons(MLX4_RATELIMIT_UNITS);
-               tc->max_bw_value = htons(r);
        }
 
        in_mod = MLX4_SET_PORT_SCHEDULER << 8 | port;
index b215742..56779c1 100644 (file)
@@ -56,7 +56,7 @@ int mlx5_buf_alloc(struct mlx5_core_dev *dev, int size, int max_direct,
        if (size <= max_direct) {
                buf->nbufs        = 1;
                buf->npages       = 1;
-               buf->page_shift   = get_order(size) + PAGE_SHIFT;
+               buf->page_shift   = (u8)get_order(size) + PAGE_SHIFT;
                buf->direct.buf   = dma_zalloc_coherent(&dev->pdev->dev,
                                                        size, &t, GFP_KERNEL);
                if (!buf->direct.buf)
index 87d1b01..4671747 100644 (file)
@@ -464,7 +464,7 @@ static void dump_command(struct mlx5_core_dev *dev,
        struct mlx5_cmd_msg *msg = input ? ent->in : ent->out;
        struct mlx5_cmd_mailbox *next = msg->next;
        int data_only;
-       int offset = 0;
+       u32 offset = 0;
        int dump_len;
 
        data_only = !!(mlx5_core_debug_mask & (1 << MLX5_CMD_DATA));
index 7f39ebc..4e8bd0b 100644 (file)
@@ -252,7 +252,9 @@ static int mlx5_eq_int(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
                        case MLX5_PORT_CHANGE_SUBTYPE_GUID:
                        case MLX5_PORT_CHANGE_SUBTYPE_CLIENT_REREG:
                        case MLX5_PORT_CHANGE_SUBTYPE_INITIALIZED:
-                               dev->event(dev, port_subtype_event(eqe->sub_type), &port);
+                               if (dev->event)
+                                       dev->event(dev, port_subtype_event(eqe->sub_type),
+                                                  (unsigned long)port);
                                break;
                        default:
                                mlx5_core_warn(dev, "Port event with unrecognized subtype: port %d, sub_type %d\n",
index 18d6fd5..fd80ecf 100644 (file)
@@ -37,7 +37,7 @@
 #include "mlx5_core.h"
 
 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
-                     u16 opmod, int port)
+                     u16 opmod, u8 port)
 {
        struct mlx5_mad_ifc_mbox_in *in = NULL;
        struct mlx5_mad_ifc_mbox_out *out = NULL;
index ee24f13..f2716cc 100644 (file)
@@ -58,7 +58,100 @@ int mlx5_core_debug_mask;
 module_param_named(debug_mask, mlx5_core_debug_mask, int, 0644);
 MODULE_PARM_DESC(debug_mask, "debug mask: 1 = dump cmd data, 2 = dump cmd exec time, 3 = both. Default=0");
 
+#define MLX5_DEFAULT_PROF      2
+static int prof_sel = MLX5_DEFAULT_PROF;
+module_param_named(prof_sel, prof_sel, int, 0444);
+MODULE_PARM_DESC(prof_sel, "profile selector. Valid range 0 - 2");
+
 struct workqueue_struct *mlx5_core_wq;
+static LIST_HEAD(intf_list);
+static LIST_HEAD(dev_list);
+static DEFINE_MUTEX(intf_mutex);
+
+struct mlx5_device_context {
+       struct list_head        list;
+       struct mlx5_interface  *intf;
+       void                   *context;
+};
+
+static struct mlx5_profile profile[] = {
+       [0] = {
+               .mask           = 0,
+       },
+       [1] = {
+               .mask           = MLX5_PROF_MASK_QP_SIZE,
+               .log_max_qp     = 12,
+       },
+       [2] = {
+               .mask           = MLX5_PROF_MASK_QP_SIZE |
+                                 MLX5_PROF_MASK_MR_CACHE,
+               .log_max_qp     = 17,
+               .mr_cache[0]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[1]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[2]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[3]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[4]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[5]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[6]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[7]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[8]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[9]    = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[10]   = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[11]   = {
+                       .size   = 500,
+                       .limit  = 250
+               },
+               .mr_cache[12]   = {
+                       .size   = 64,
+                       .limit  = 32
+               },
+               .mr_cache[13]   = {
+                       .size   = 32,
+                       .limit  = 16
+               },
+               .mr_cache[14]   = {
+                       .size   = 16,
+                       .limit  = 8
+               },
+               .mr_cache[15]   = {
+                       .size   = 8,
+                       .limit  = 4
+               },
+       },
+};
 
 static int set_dma_caps(struct pci_dev *pdev)
 {
@@ -218,7 +311,7 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
 
        copy_rw_fields(&set_ctx->hca_cap, &query_out->hca_cap);
 
-       if (dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
+       if (dev->profile && dev->profile->mask & MLX5_PROF_MASK_QP_SIZE)
                set_ctx->hca_cap.log_max_qp = dev->profile->log_max_qp;
 
        flags = be64_to_cpu(query_out->hca_cap.flags);
@@ -299,7 +392,7 @@ static int mlx5_core_disable_hca(struct mlx5_core_dev *dev)
        return 0;
 }
 
-int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
+static int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev)
 {
        struct mlx5_priv *priv = &dev->priv;
        int err;
@@ -489,7 +582,7 @@ err_dbg:
 }
 EXPORT_SYMBOL(mlx5_dev_init);
 
-void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
+static void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
 {
        struct mlx5_priv *priv = &dev->priv;
 
@@ -516,7 +609,190 @@ void mlx5_dev_cleanup(struct mlx5_core_dev *dev)
        pci_disable_device(dev->pdev);
        debugfs_remove(priv->dbg_root);
 }
-EXPORT_SYMBOL(mlx5_dev_cleanup);
+
+static void mlx5_add_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+       struct mlx5_device_context *dev_ctx;
+       struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+       dev_ctx = kmalloc(sizeof(*dev_ctx), GFP_KERNEL);
+       if (!dev_ctx) {
+               pr_warn("mlx5_add_device: alloc context failed\n");
+               return;
+       }
+
+       dev_ctx->intf    = intf;
+       dev_ctx->context = intf->add(dev);
+
+       if (dev_ctx->context) {
+               spin_lock_irq(&priv->ctx_lock);
+               list_add_tail(&dev_ctx->list, &priv->ctx_list);
+               spin_unlock_irq(&priv->ctx_lock);
+       } else {
+               kfree(dev_ctx);
+       }
+}
+
+static void mlx5_remove_device(struct mlx5_interface *intf, struct mlx5_priv *priv)
+{
+       struct mlx5_device_context *dev_ctx;
+       struct mlx5_core_dev *dev = container_of(priv, struct mlx5_core_dev, priv);
+
+       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+               if (dev_ctx->intf == intf) {
+                       spin_lock_irq(&priv->ctx_lock);
+                       list_del(&dev_ctx->list);
+                       spin_unlock_irq(&priv->ctx_lock);
+
+                       intf->remove(dev, dev_ctx->context);
+                       kfree(dev_ctx);
+                       return;
+               }
+}
+static int mlx5_register_device(struct mlx5_core_dev *dev)
+{
+       struct mlx5_priv *priv = &dev->priv;
+       struct mlx5_interface *intf;
+
+       mutex_lock(&intf_mutex);
+       list_add_tail(&priv->dev_list, &dev_list);
+       list_for_each_entry(intf, &intf_list, list)
+               mlx5_add_device(intf, priv);
+       mutex_unlock(&intf_mutex);
+
+       return 0;
+}
+static void mlx5_unregister_device(struct mlx5_core_dev *dev)
+{
+       struct mlx5_priv *priv = &dev->priv;
+       struct mlx5_interface *intf;
+
+       mutex_lock(&intf_mutex);
+       list_for_each_entry(intf, &intf_list, list)
+               mlx5_remove_device(intf, priv);
+       list_del(&priv->dev_list);
+       mutex_unlock(&intf_mutex);
+}
+
+int mlx5_register_interface(struct mlx5_interface *intf)
+{
+       struct mlx5_priv *priv;
+
+       if (!intf->add || !intf->remove)
+               return -EINVAL;
+
+       mutex_lock(&intf_mutex);
+       list_add_tail(&intf->list, &intf_list);
+       list_for_each_entry(priv, &dev_list, dev_list)
+               mlx5_add_device(intf, priv);
+       mutex_unlock(&intf_mutex);
+
+       return 0;
+}
+EXPORT_SYMBOL(mlx5_register_interface);
+
+void mlx5_unregister_interface(struct mlx5_interface *intf)
+{
+       struct mlx5_priv *priv;
+
+       mutex_lock(&intf_mutex);
+       list_for_each_entry(priv, &dev_list, dev_list)
+              mlx5_remove_device(intf, priv);
+       list_del(&intf->list);
+       mutex_unlock(&intf_mutex);
+}
+EXPORT_SYMBOL(mlx5_unregister_interface);
+
+static void mlx5_core_event(struct mlx5_core_dev *dev, enum mlx5_dev_event event,
+                           unsigned long param)
+{
+       struct mlx5_priv *priv = &dev->priv;
+       struct mlx5_device_context *dev_ctx;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->ctx_lock, flags);
+
+       list_for_each_entry(dev_ctx, &priv->ctx_list, list)
+               if (dev_ctx->intf->event)
+                       dev_ctx->intf->event(dev, dev_ctx->context, event, param);
+
+       spin_unlock_irqrestore(&priv->ctx_lock, flags);
+}
+
+struct mlx5_core_event_handler {
+       void (*event)(struct mlx5_core_dev *dev,
+                     enum mlx5_dev_event event,
+                     void *data);
+};
+
+static int init_one(struct pci_dev *pdev,
+                   const struct pci_device_id *id)
+{
+       struct mlx5_core_dev *dev;
+       struct mlx5_priv *priv;
+       int err;
+
+       dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+       if (!dev) {
+               dev_err(&pdev->dev, "kzalloc failed\n");
+               return -ENOMEM;
+       }
+       priv = &dev->priv;
+
+       pci_set_drvdata(pdev, dev);
+
+       if (prof_sel < 0 || prof_sel >= ARRAY_SIZE(profile)) {
+               pr_warn("selected profile out of range, selecting default (%d)\n",
+                       MLX5_DEFAULT_PROF);
+               prof_sel = MLX5_DEFAULT_PROF;
+       }
+       dev->profile = &profile[prof_sel];
+       dev->event = mlx5_core_event;
+
+       err = mlx5_dev_init(dev, pdev);
+       if (err) {
+               dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
+               goto out;
+       }
+
+       INIT_LIST_HEAD(&priv->ctx_list);
+       spin_lock_init(&priv->ctx_lock);
+       err = mlx5_register_device(dev);
+       if (err) {
+               dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
+               goto out_init;
+       }
+
+       return 0;
+
+out_init:
+       mlx5_dev_cleanup(dev);
+out:
+       kfree(dev);
+       return err;
+}
+static void remove_one(struct pci_dev *pdev)
+{
+       struct mlx5_core_dev *dev  = pci_get_drvdata(pdev);
+
+       mlx5_unregister_device(dev);
+       mlx5_dev_cleanup(dev);
+       kfree(dev);
+}
+
+static const struct pci_device_id mlx5_core_pci_table[] = {
+       { PCI_VDEVICE(MELLANOX, 4113) }, /* MT4113 Connect-IB */
+       { 0, }
+};
+
+MODULE_DEVICE_TABLE(pci, mlx5_core_pci_table);
+
+static struct pci_driver mlx5_core_driver = {
+       .name           = DRIVER_NAME,
+       .id_table       = mlx5_core_pci_table,
+       .probe          = init_one,
+       .remove         = remove_one
+};
 
 static int __init init(void)
 {
@@ -530,8 +806,15 @@ static int __init init(void)
        }
        mlx5_health_init();
 
+       err = pci_register_driver(&mlx5_core_driver);
+       if (err)
+               goto err_health;
+
        return 0;
 
+err_health:
+       mlx5_health_cleanup();
+       destroy_workqueue(mlx5_core_wq);
 err_debug:
        mlx5_unregister_debugfs();
        return err;
@@ -539,6 +822,7 @@ err_debug:
 
 static void __exit cleanup(void)
 {
+       pci_unregister_driver(&mlx5_core_driver);
        mlx5_health_cleanup();
        destroy_workqueue(mlx5_core_wq);
        mlx5_unregister_debugfs();
index ba0401d..184c361 100644 (file)
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
        write_lock_irq(&table->lock);
        err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
        write_unlock_irq(&table->lock);
+       if (err) {
+               mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
+                              mlx5_base_mkey(mr->key), err);
+               mlx5_core_destroy_mkey(dev, mr);
+       }
 
        return err;
 }
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
        struct mlx5_mr_table *table = &dev->priv.mr_table;
        struct mlx5_destroy_mkey_mbox_in in;
        struct mlx5_destroy_mkey_mbox_out out;
+       struct mlx5_core_mr *deleted_mr;
        unsigned long flags;
        int err;
 
        memset(&in, 0, sizeof(in));
        memset(&out, 0, sizeof(out));
 
+       write_lock_irqsave(&table->lock, flags);
+       deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
+       write_unlock_irqrestore(&table->lock, flags);
+       if (!deleted_mr) {
+               mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
+                              mlx5_base_mkey(mr->key));
+               return -ENOENT;
+       }
+
        in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
        in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
        err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
        if (out.hdr.status)
                return mlx5_cmd_status_to_err(&out.hdr);
 
-       write_lock_irqsave(&table->lock, flags);
-       radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
-       write_unlock_irqrestore(&table->lock, flags);
-
        return err;
 }
 EXPORT_SYMBOL(mlx5_core_destroy_mkey);
index c2a953e..d476918 100644 (file)
@@ -51,7 +51,7 @@ enum {
 
 struct mlx5_pages_req {
        struct mlx5_core_dev *dev;
-       u32     func_id;
+       u16     func_id;
        s32     npages;
        struct work_struct work;
 };
index 8c9ac87..3139658 100644 (file)
@@ -86,7 +86,7 @@ struct mlx5_reg_pcap {
        __be32                  caps_31_0;
 };
 
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps)
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps)
 {
        struct mlx5_reg_pcap in;
        struct mlx5_reg_pcap out;
index c83d16d..0eb4764 100644 (file)
@@ -1519,7 +1519,8 @@ static int ks_hw_init(struct ks_net *ks)
        ks->all_mcast = 0;
        ks->mcast_lst_size = 0;
 
-       ks->frame_head_info = kmalloc(MHEADER_SIZE, GFP_KERNEL);
+       ks->frame_head_info = devm_kmalloc(&ks->pdev->dev, MHEADER_SIZE,
+                                          GFP_KERNEL);
        if (!ks->frame_head_info)
                return false;
 
@@ -1537,44 +1538,41 @@ MODULE_DEVICE_TABLE(of, ks8851_ml_dt_ids);
 
 static int ks8851_probe(struct platform_device *pdev)
 {
-       int err = -ENOMEM;
+       int err;
        struct resource *io_d, *io_c;
        struct net_device *netdev;
        struct ks_net *ks;
        u16 id, data;
        const char *mac;
 
-       io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
-       io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
-
-       if (!request_mem_region(io_d->start, resource_size(io_d), DRV_NAME))
-               goto err_mem_region;
-
-       if (!request_mem_region(io_c->start, resource_size(io_c), DRV_NAME))
-               goto err_mem_region1;
-
        netdev = alloc_etherdev(sizeof(struct ks_net));
        if (!netdev)
-               goto err_alloc_etherdev;
+               return -ENOMEM;
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
        ks = netdev_priv(netdev);
        ks->netdev = netdev;
-       ks->hw_addr = ioremap(io_d->start, resource_size(io_d));
 
-       if (!ks->hw_addr)
-               goto err_ioremap;
+       io_d = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       ks->hw_addr = devm_ioremap_resource(&pdev->dev, io_d);
+       if (IS_ERR(ks->hw_addr)) {
+               err = PTR_ERR(ks->hw_addr);
+               goto err_free;
+       }
 
-       ks->hw_addr_cmd = ioremap(io_c->start, resource_size(io_c));
-       if (!ks->hw_addr_cmd)
-               goto err_ioremap1;
+       io_c = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+       ks->hw_addr_cmd = devm_ioremap_resource(&pdev->dev, io_c);
+       if (IS_ERR(ks->hw_addr_cmd)) {
+               err = PTR_ERR(ks->hw_addr_cmd);
+               goto err_free;
+       }
 
        netdev->irq = platform_get_irq(pdev, 0);
 
        if ((int)netdev->irq < 0) {
                err = netdev->irq;
-               goto err_get_irq;
+               goto err_free;
        }
 
        ks->pdev = pdev;
@@ -1604,18 +1602,18 @@ static int ks8851_probe(struct platform_device *pdev)
        if ((ks_rdreg16(ks, KS_CIDER) & ~CIDER_REV_MASK) != CIDER_ID) {
                netdev_err(netdev, "failed to read device ID\n");
                err = -ENODEV;
-               goto err_register;
+               goto err_free;
        }
 
        if (ks_read_selftest(ks)) {
                netdev_err(netdev, "failed to read device ID\n");
                err = -ENODEV;
-               goto err_register;
+               goto err_free;
        }
 
        err = register_netdev(netdev);
        if (err)
-               goto err_register;
+               goto err_free;
 
        platform_set_drvdata(pdev, netdev);
 
@@ -1663,32 +1661,17 @@ static int ks8851_probe(struct platform_device *pdev)
 
 err_pdata:
        unregister_netdev(netdev);
-err_register:
-err_get_irq:
-       iounmap(ks->hw_addr_cmd);
-err_ioremap1:
-       iounmap(ks->hw_addr);
-err_ioremap:
+err_free:
        free_netdev(netdev);
-err_alloc_etherdev:
-       release_mem_region(io_c->start, resource_size(io_c));
-err_mem_region1:
-       release_mem_region(io_d->start, resource_size(io_d));
-err_mem_region:
        return err;
 }
 
 static int ks8851_remove(struct platform_device *pdev)
 {
        struct net_device *netdev = platform_get_drvdata(pdev);
-       struct ks_net *ks = netdev_priv(netdev);
-       struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
-       kfree(ks->frame_head_info);
        unregister_netdev(netdev);
-       iounmap(ks->hw_addr);
        free_netdev(netdev);
-       release_mem_region(iomem->start, resource_size(iomem));
        return 0;
 
 }
index 7a0dead..2eda153 100644 (file)
@@ -503,7 +503,6 @@ vxge_rx_1b_compl(struct __vxge_hw_ring *ringh, void *dtr,
 
                        skb_hwts = skb_hwtstamps(skb);
                        skb_hwts->hwtstamp = ns_to_ktime(ns);
-                       skb_hwts->syststamp.tv64 = 0;
                }
 
                /* rth_hash_type and rth_it_hit are non-zero regardless of
index 7dc3e9b..979c698 100644 (file)
@@ -247,28 +247,6 @@ static void octeon_mgmt_rx_fill_ring(struct net_device *netdev)
        }
 }
 
-static ktime_t ptp_to_ktime(u64 ptptime)
-{
-       ktime_t ktimebase;
-       u64 ptpbase;
-       unsigned long flags;
-
-       local_irq_save(flags);
-       /* Fill the icache with the code */
-       ktime_get_real();
-       /* Flush all pending operations */
-       mb();
-       /* Read the time and PTP clock as close together as
-        * possible. It is important that this sequence take the same
-        * amount of time to reduce jitter
-        */
-       ktimebase = ktime_get_real();
-       ptpbase = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_HI);
-       local_irq_restore(flags);
-
-       return ktime_sub_ns(ktimebase, ptpbase - ptptime);
-}
-
 static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
 {
        union cvmx_mixx_orcnt mix_orcnt;
@@ -312,12 +290,12 @@ static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p)
                /* Read the hardware TX timestamp if one was recorded */
                if (unlikely(re.s.tstamp)) {
                        struct skb_shared_hwtstamps ts;
+                       memset(&ts, 0, sizeof(ts));
                        /* Read the timestamp */
                        u64 ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port));
                        /* Remove the timestamp from the FIFO */
                        cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0);
                        /* Tell the kernel about the timestamp */
-                       ts.syststamp = ptp_to_ktime(ns);
                        ts.hwtstamp = ns_to_ktime(ns);
                        skb_tstamp_tx(skb, &ts);
                }
@@ -429,7 +407,6 @@ good:
                        struct skb_shared_hwtstamps *ts;
                        ts = skb_hwtstamps(skb);
                        ts->hwtstamp = ns_to_ktime(ns);
-                       ts->syststamp = ptp_to_ktime(ns);
                        __skb_pull(skb, 8);
                }
                skb->protocol = eth_type_trans(skb, netdev);
index be618b9..16039d1 100644 (file)
@@ -39,8 +39,8 @@
 
 #define _QLCNIC_LINUX_MAJOR 5
 #define _QLCNIC_LINUX_MINOR 3
-#define _QLCNIC_LINUX_SUBVERSION 60
-#define QLCNIC_LINUX_VERSIONID  "5.3.60"
+#define _QLCNIC_LINUX_SUBVERSION 61
+#define QLCNIC_LINUX_VERSIONID  "5.3.61"
 #define QLCNIC_DRV_IDC_VER  0x01
 #define QLCNIC_DRIVER_VERSION  ((_QLCNIC_LINUX_MAJOR << 16) |\
                 (_QLCNIC_LINUX_MINOR << 8) | (_QLCNIC_LINUX_SUBVERSION))
index 561cb11..a72bcdd 100644 (file)
@@ -926,7 +926,7 @@ static int qlcnic_dcb_get_num_tcs(struct net_device *netdev, int attr, u8 *num)
        }
 }
 
-static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
+static int qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
        struct dcb_app app = {
@@ -935,7 +935,7 @@ static u8 qlcnic_dcb_get_app(struct net_device *netdev, u8 idtype, u16 id)
                             };
 
        if (!test_bit(QLCNIC_DCB_STATE, &adapter->dcb->state))
-               return 0;
+               return -EINVAL;
 
        return dcb_getapp(netdev, &app);
 }
index 4fc1867..0fdbcc8 100644 (file)
@@ -427,16 +427,17 @@ static int qlcnic_fdb_add(struct ndmsg *ndm, struct nlattr *tb[],
 }
 
 static int qlcnic_fdb_dump(struct sk_buff *skb, struct netlink_callback *ncb,
-                       struct net_device *netdev, int idx)
+                       struct net_device *netdev,
+                       struct net_device *filter_dev, int idx)
 {
        struct qlcnic_adapter *adapter = netdev_priv(netdev);
 
        if (!adapter->fdb_mac_learn)
-               return ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+               return ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
 
        if ((adapter->flags & QLCNIC_ESWITCH_ENABLED) ||
            qlcnic_sriov_check(adapter))
-               idx = ndo_dflt_fdb_dump(skb, ncb, netdev, idx);
+               idx = ndo_dflt_fdb_dump(skb, ncb, netdev, filter_dev, idx);
 
        return idx;
 }
@@ -2980,17 +2981,43 @@ static inline void dump_tx_ring_desc(struct qlcnic_host_tx_ring *tx_ring)
        }
 }
 
-static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
+static void qlcnic_dump_rings(struct qlcnic_adapter *adapter)
 {
+       struct qlcnic_recv_context *recv_ctx = adapter->recv_ctx;
        struct net_device *netdev = adapter->netdev;
+       struct qlcnic_host_rds_ring *rds_ring;
+       struct qlcnic_host_sds_ring *sds_ring;
        struct qlcnic_host_tx_ring *tx_ring;
        int ring;
 
        if (!netdev || !netif_running(netdev))
                return;
 
+       for (ring = 0; ring < adapter->max_rds_rings; ring++) {
+               rds_ring = &recv_ctx->rds_rings[ring];
+               if (!rds_ring)
+                       continue;
+               netdev_info(netdev,
+                           "rds_ring=%d crb_rcv_producer=%d producer=%u num_desc=%u\n",
+                            ring, readl(rds_ring->crb_rcv_producer),
+                            rds_ring->producer, rds_ring->num_desc);
+       }
+
+       for (ring = 0; ring < adapter->drv_sds_rings; ring++) {
+               sds_ring = &(recv_ctx->sds_rings[ring]);
+               if (!sds_ring)
+                       continue;
+               netdev_info(netdev,
+                           "sds_ring=%d crb_sts_consumer=%d consumer=%u crb_intr_mask=%d num_desc=%u\n",
+                           ring, readl(sds_ring->crb_sts_consumer),
+                           sds_ring->consumer, readl(sds_ring->crb_intr_mask),
+                           sds_ring->num_desc);
+       }
+
        for (ring = 0; ring < adapter->drv_tx_rings; ring++) {
                tx_ring = &adapter->tx_ring[ring];
+               if (!tx_ring)
+                       continue;
                netdev_info(netdev, "Tx ring=%d Context Id=0x%x\n",
                            ring, tx_ring->ctx_id);
                netdev_info(netdev,
@@ -3013,9 +3040,10 @@ static void qlcnic_dump_tx_rings(struct qlcnic_adapter *adapter)
                netdev_info(netdev, "Total desc=%d, Available desc=%d\n",
                            tx_ring->num_desc, qlcnic_tx_avail(tx_ring));
 
-               if (netif_msg_tx_done(adapter->ahw))
+               if (netif_msg_tx_err(adapter->ahw))
                        dump_tx_ring_desc(tx_ring);
        }
+
 }
 
 static void qlcnic_tx_timeout(struct net_device *netdev)
@@ -3025,16 +3053,18 @@ static void qlcnic_tx_timeout(struct net_device *netdev)
        if (test_bit(__QLCNIC_RESETTING, &adapter->state))
                return;
 
-       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS) {
-               netdev_info(netdev, "Tx timeout, reset the adapter.\n");
+       qlcnic_dump_rings(adapter);
+
+       if (++adapter->tx_timeo_cnt >= QLCNIC_MAX_TX_TIMEOUTS ||
+           netif_msg_tx_err(adapter->ahw)) {
+               netdev_err(netdev, "Tx timeout, reset the adapter.\n");
                if (qlcnic_82xx_check(adapter))
                        adapter->need_fw_reset = 1;
                else if (qlcnic_83xx_check(adapter))
                        qlcnic_83xx_idc_request_reset(adapter,
                                                      QLCNIC_FORCE_FW_DUMP_KEY);
        } else {
-               netdev_info(netdev, "Tx timeout, reset adapter context.\n");
-               qlcnic_dump_tx_rings(adapter);
+               netdev_err(netdev, "Tx timeout, reset adapter context.\n");
                adapter->ahw->reset_context = 1;
        }
 }
index 2bc728e..75b1693 100644 (file)
@@ -382,13 +382,6 @@ static int cp_get_eeprom(struct net_device *dev,
 static int cp_set_eeprom(struct net_device *dev,
                         struct ethtool_eeprom *eeprom, u8 *data);
 
-static DEFINE_PCI_DEVICE_TABLE(cp_pci_tbl) = {
-       { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
-       { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
-       { },
-};
-MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
-
 static struct {
        const char str[ETH_GSTRING_LEN];
 } ethtool_stats_keys[] = {
@@ -1887,11 +1880,7 @@ static int cp_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
        resource_size_t pciaddr;
        unsigned int addr_len, i, pci_using_dac;
 
-#ifndef MODULE
-       static int version_printed;
-       if (version_printed++ == 0)
-               pr_info("%s", version);
-#endif
+       pr_info_once("%s", version);
 
        if (pdev->vendor == PCI_VENDOR_ID_REALTEK &&
            pdev->device == PCI_DEVICE_ID_REALTEK_8139 && pdev->revision < 0x20) {
@@ -2110,6 +2099,13 @@ static int cp_resume (struct pci_dev *pdev)
 }
 #endif /* CONFIG_PM */
 
+static const struct pci_device_id cp_pci_tbl[] = {
+        { PCI_DEVICE(PCI_VENDOR_ID_REALTEK,     PCI_DEVICE_ID_REALTEK_8139), },
+        { PCI_DEVICE(PCI_VENDOR_ID_TTTECH,      PCI_DEVICE_ID_TTTECH_MC322), },
+        { },
+};
+MODULE_DEVICE_TABLE(pci, cp_pci_tbl);
+
 static struct pci_driver cp_driver = {
        .name         = DRV_NAME,
        .id_table     = cp_pci_tbl,
@@ -2121,18 +2117,4 @@ static struct pci_driver cp_driver = {
 #endif
 };
 
-static int __init cp_init (void)
-{
-#ifdef MODULE
-       pr_info("%s", version);
-#endif
-       return pci_register_driver(&cp_driver);
-}
-
-static void __exit cp_exit (void)
-{
-       pci_unregister_driver (&cp_driver);
-}
-
-module_init(cp_init);
-module_exit(cp_exit);
+module_pci_driver(cp_driver);
index be425ad..9887bcb 100644 (file)
@@ -27,6 +27,8 @@
 #include <linux/firmware.h>
 #include <linux/pci-aspm.h>
 #include <linux/prefetch.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
 
 #include <asm/io.h>
 #include <asm/irq.h>
@@ -538,6 +540,7 @@ enum rtl_register_content {
        MagicPacket     = (1 << 5),     /* Wake up when receives a Magic Packet */
        LinkUp          = (1 << 4),     /* Wake up when the cable connection is re-established */
        Jumbo_En0       = (1 << 2),     /* 8168 only. Reserved in the 8168b */
+       Rdy_to_L23      = (1 << 1),     /* L23 Enable */
        Beacon_en       = (1 << 0),     /* 8168 only. Reserved in the 8168b */
 
        /* Config4 register */
@@ -626,39 +629,22 @@ enum rtl_tx_desc_bit_0 {
 
 /* 8102e, 8168c and beyond. */
 enum rtl_tx_desc_bit_1 {
+       /* First doubleword. */
+       TD1_GTSENV4     = (1 << 26),            /* Giant Send for IPv4 */
+       TD1_GTSENV6     = (1 << 25),            /* Giant Send for IPv6 */
+#define GTTCPHO_SHIFT                  18
+#define GTTCPHO_MAX                    0x7fU
+
        /* Second doubleword. */
+#define TCPHO_SHIFT                    18
+#define TCPHO_MAX                      0x3ffU
 #define TD1_MSS_SHIFT                  18      /* MSS position (11 bits) */
-       TD1_IP_CS       = (1 << 29),            /* Calculate IP checksum */
+       TD1_IPv6_CS     = (1 << 28),            /* Calculate IPv6 checksum */
+       TD1_IPv4_CS     = (1 << 29),            /* Calculate IPv4 checksum */
        TD1_TCP_CS      = (1 << 30),            /* Calculate TCP/IP checksum */
        TD1_UDP_CS      = (1 << 31),            /* Calculate UDP/IP checksum */
 };
 
-static const struct rtl_tx_desc_info {
-       struct {
-               u32 udp;
-               u32 tcp;
-       } checksum;
-       u16 mss_shift;
-       u16 opts_offset;
-} tx_desc_info [] = {
-       [RTL_TD_0] = {
-               .checksum = {
-                       .udp    = TD0_IP_CS | TD0_UDP_CS,
-                       .tcp    = TD0_IP_CS | TD0_TCP_CS
-               },
-               .mss_shift      = TD0_MSS_SHIFT,
-               .opts_offset    = 0
-       },
-       [RTL_TD_1] = {
-               .checksum = {
-                       .udp    = TD1_IP_CS | TD1_UDP_CS,
-                       .tcp    = TD1_IP_CS | TD1_TCP_CS
-               },
-               .mss_shift      = TD1_MSS_SHIFT,
-               .opts_offset    = 1
-       }
-};
-
 enum rtl_rx_desc_bit {
        /* Rx private */
        PID1            = (1 << 18), /* Protocol ID bit 1/2 */
@@ -782,6 +768,7 @@ struct rtl8169_private {
        unsigned int (*phy_reset_pending)(struct rtl8169_private *tp);
        unsigned int (*link_ok)(void __iomem *);
        int (*do_ioctl)(struct rtl8169_private *tp, struct mii_ioctl_data *data, int cmd);
+       bool (*tso_csum)(struct rtl8169_private *, struct sk_buff *, u32 *);
 
        struct {
                DECLARE_BITMAP(flags, RTL_FLAG_MAX);
@@ -4239,6 +4226,8 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
                RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST);
                break;
        case RTL_GIGA_MAC_VER_40:
+               RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
+               break;
        case RTL_GIGA_MAC_VER_41:
        case RTL_GIGA_MAC_VER_42:
        case RTL_GIGA_MAC_VER_43:
@@ -4897,6 +4886,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
                                 PCI_EXP_LNKCTL_CLKREQ_EN);
 }
 
+static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
+{
+       void __iomem *ioaddr = tp->mmio_addr;
+       u8 data;
+
+       data = RTL_R8(Config3);
+
+       if (enable)
+               data |= Rdy_to_L23;
+       else
+               data &= ~Rdy_to_L23;
+
+       RTL_W8(Config3, data);
+}
+
 #define R8168_CPCMD_QUIRK_MASK (\
        EnableBist | \
        Mac_dbgo_oe | \
@@ -5246,6 +5250,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
        };
 
        rtl_hw_start_8168f(tp);
+       rtl_pcie_state_l2l3_enable(tp, false);
 
        rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
 
@@ -5284,6 +5289,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
 
        rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
        rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5543,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
        RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
 
        rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5580,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
        rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
        rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5594,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
        RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
        RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
        RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
+
+       rtl_pcie_state_l2l3_enable(tp, false);
 }
 
 static void rtl_hw_start_8101(struct net_device *dev)
@@ -5941,32 +5954,179 @@ static bool rtl_test_hw_pad_bug(struct rtl8169_private *tp, struct sk_buff *skb)
        return skb->len < ETH_ZLEN && tp->mac_version == RTL_GIGA_MAC_VER_34;
 }
 
-static inline bool rtl8169_tso_csum(struct rtl8169_private *tp,
-                                   struct sk_buff *skb, u32 *opts)
+static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+                                     struct net_device *dev);
+/* r8169_csum_workaround()
+ * The hw limites the value the transport offset. When the offset is out of the
+ * range, calculate the checksum by sw.
+ */
+static void r8169_csum_workaround(struct rtl8169_private *tp,
+                                 struct sk_buff *skb)
+{
+       if (skb_shinfo(skb)->gso_size) {
+               netdev_features_t features = tp->dev->features;
+               struct sk_buff *segs, *nskb;
+
+               features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
+               segs = skb_gso_segment(skb, features);
+               if (IS_ERR(segs) || !segs)
+                       goto drop;
+
+               do {
+                       nskb = segs;
+                       segs = segs->next;
+                       nskb->next = NULL;
+                       rtl8169_start_xmit(nskb, tp->dev);
+               } while (segs);
+
+               dev_kfree_skb(skb);
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               if (skb_checksum_help(skb) < 0)
+                       goto drop;
+
+               rtl8169_start_xmit(skb, tp->dev);
+       } else {
+               struct net_device_stats *stats;
+
+drop:
+               stats = &tp->dev->stats;
+               stats->tx_dropped++;
+               dev_kfree_skb(skb);
+       }
+}
+
+/* msdn_giant_send_check()
+ * According to the document of microsoft, the TCP Pseudo Header excludes the
+ * packet length for IPv6 TCP large packets.
+ */
+static int msdn_giant_send_check(struct sk_buff *skb)
+{
+       const struct ipv6hdr *ipv6h;
+       struct tcphdr *th;
+       int ret;
+
+       ret = skb_cow_head(skb, 0);
+       if (ret)
+               return ret;
+
+       ipv6h = ipv6_hdr(skb);
+       th = tcp_hdr(skb);
+
+       th->check = 0;
+       th->check = ~tcp_v6_check(0, &ipv6h->saddr, &ipv6h->daddr, 0);
+
+       return ret;
+}
+
+static inline __be16 get_protocol(struct sk_buff *skb)
+{
+       __be16 protocol;
+
+       if (skb->protocol == htons(ETH_P_8021Q))
+               protocol = vlan_eth_hdr(skb)->h_vlan_encapsulated_proto;
+       else
+               protocol = skb->protocol;
+
+       return protocol;
+}
+
+static bool rtl8169_tso_csum_v1(struct rtl8169_private *tp,
+                               struct sk_buff *skb, u32 *opts)
 {
-       const struct rtl_tx_desc_info *info = tx_desc_info + tp->txd_version;
        u32 mss = skb_shinfo(skb)->gso_size;
-       int offset = info->opts_offset;
 
        if (mss) {
                opts[0] |= TD_LSO;
-               opts[offset] |= min(mss, TD_MSS_MAX) << info->mss_shift;
+               opts[0] |= min(mss, TD_MSS_MAX) << TD0_MSS_SHIFT;
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                const struct iphdr *ip = ip_hdr(skb);
 
+               if (ip->protocol == IPPROTO_TCP)
+                       opts[0] |= TD0_IP_CS | TD0_TCP_CS;
+               else if (ip->protocol == IPPROTO_UDP)
+                       opts[0] |= TD0_IP_CS | TD0_UDP_CS;
+               else
+                       WARN_ON_ONCE(1);
+       }
+
+       return true;
+}
+
+static bool rtl8169_tso_csum_v2(struct rtl8169_private *tp,
+                               struct sk_buff *skb, u32 *opts)
+{
+       u32 transport_offset = (u32)skb_transport_offset(skb);
+       u32 mss = skb_shinfo(skb)->gso_size;
+
+       if (mss) {
+               if (transport_offset > GTTCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->dev,
+                                  "Invalid transport offset 0x%x for TSO\n",
+                                  transport_offset);
+                       return false;
+               }
+
+               switch (get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       opts[0] |= TD1_GTSENV4;
+                       break;
+
+               case htons(ETH_P_IPV6):
+                       if (msdn_giant_send_check(skb))
+                               return false;
+
+                       opts[0] |= TD1_GTSENV6;
+                       break;
+
+               default:
+                       WARN_ON_ONCE(1);
+                       break;
+               }
+
+               opts[0] |= transport_offset << GTTCPHO_SHIFT;
+               opts[1] |= min(mss, TD_MSS_MAX) << TD1_MSS_SHIFT;
+       } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
+               u8 ip_protocol;
+
                if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
                        return skb_checksum_help(skb) == 0 && rtl_skb_pad(skb);
 
-               if (ip->protocol == IPPROTO_TCP)
-                       opts[offset] |= info->checksum.tcp;
-               else if (ip->protocol == IPPROTO_UDP)
-                       opts[offset] |= info->checksum.udp;
+               if (transport_offset > TCPHO_MAX) {
+                       netif_warn(tp, tx_err, tp->dev,
+                                  "Invalid transport offset 0x%x\n",
+                                  transport_offset);
+                       return false;
+               }
+
+               switch (get_protocol(skb)) {
+               case htons(ETH_P_IP):
+                       opts[1] |= TD1_IPv4_CS;
+                       ip_protocol = ip_hdr(skb)->protocol;
+                       break;
+
+               case htons(ETH_P_IPV6):
+                       opts[1] |= TD1_IPv6_CS;
+                       ip_protocol = ipv6_hdr(skb)->nexthdr;
+                       break;
+
+               default:
+                       ip_protocol = IPPROTO_RAW;
+                       break;
+               }
+
+               if (ip_protocol == IPPROTO_TCP)
+                       opts[1] |= TD1_TCP_CS;
+               else if (ip_protocol == IPPROTO_UDP)
+                       opts[1] |= TD1_UDP_CS;
                else
                        WARN_ON_ONCE(1);
+
+               opts[1] |= transport_offset << TCPHO_SHIFT;
        } else {
                if (unlikely(rtl_test_hw_pad_bug(tp, skb)))
                        return rtl_skb_pad(skb);
        }
+
        return true;
 }
 
@@ -5994,8 +6154,10 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
        opts[1] = cpu_to_le32(rtl8169_tx_vlan_tag(skb));
        opts[0] = DescOwn;
 
-       if (!rtl8169_tso_csum(tp, skb, opts))
-               goto err_update_stats;
+       if (!tp->tso_csum(tp, skb, opts)) {
+               r8169_csum_workaround(tp, skb);
+               return NETDEV_TX_OK;
+       }
 
        len = skb_headlen(skb);
        mapping = dma_map_single(d, skb->data, len, DMA_TO_DEVICE);
@@ -6060,7 +6222,6 @@ err_dma_1:
        rtl8169_unmap_tx_skb(d, tp->tx_skb + entry, txd);
 err_dma_0:
        dev_kfree_skb_any(skb);
-err_update_stats:
        dev->stats.tx_dropped++;
        return NETDEV_TX_OK;
 
@@ -7145,6 +7306,14 @@ rtl_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                /* 8110SCd requires hardware Rx VLAN - disallow toggling */
                dev->hw_features &= ~NETIF_F_HW_VLAN_CTAG_RX;
 
+       if (tp->txd_version == RTL_TD_0)
+               tp->tso_csum = rtl8169_tso_csum_v1;
+       else if (tp->txd_version == RTL_TD_1) {
+               tp->tso_csum = rtl8169_tso_csum_v2;
+               dev->hw_features |= NETIF_F_IPV6_CSUM | NETIF_F_TSO6;
+       } else
+               WARN_ON_ONCE(1);
+
        dev->hw_features |= NETIF_F_RXALL;
        dev->hw_features |= NETIF_F_RXFCS;
 
index 7622213..60e9c2c 100644 (file)
@@ -1094,20 +1094,16 @@ static void sh_eth_ring_free(struct net_device *ndev)
 
        /* Free Rx skb ringbuffer */
        if (mdp->rx_skbuff) {
-               for (i = 0; i < mdp->num_rx_ring; i++) {
-                       if (mdp->rx_skbuff[i])
-                               dev_kfree_skb(mdp->rx_skbuff[i]);
-               }
+               for (i = 0; i < mdp->num_rx_ring; i++)
+                       dev_kfree_skb(mdp->rx_skbuff[i]);
        }
        kfree(mdp->rx_skbuff);
        mdp->rx_skbuff = NULL;
 
        /* Free Tx skb ringbuffer */
        if (mdp->tx_skbuff) {
-               for (i = 0; i < mdp->num_tx_ring; i++) {
-                       if (mdp->tx_skbuff[i])
-                               dev_kfree_skb(mdp->tx_skbuff[i]);
-               }
+               for (i = 0; i < mdp->num_tx_ring; i++)
+                       dev_kfree_skb(mdp->tx_skbuff[i]);
        }
        kfree(mdp->tx_skbuff);
        mdp->tx_skbuff = NULL;
@@ -2077,13 +2073,11 @@ static void sh_eth_tx_timeout(struct net_device *ndev)
                rxdesc = &mdp->rx_ring[i];
                rxdesc->status = 0;
                rxdesc->addr = 0xBADF00D0;
-               if (mdp->rx_skbuff[i])
-                       dev_kfree_skb(mdp->rx_skbuff[i]);
+               dev_kfree_skb(mdp->rx_skbuff[i]);
                mdp->rx_skbuff[i] = NULL;
        }
        for (i = 0; i < mdp->num_tx_ring; i++) {
-               if (mdp->tx_skbuff[i])
-                       dev_kfree_skb(mdp->tx_skbuff[i]);
+               dev_kfree_skb(mdp->tx_skbuff[i]);
                mdp->tx_skbuff[i] = NULL;
        }
 
@@ -2752,6 +2746,7 @@ static const struct of_device_id sh_eth_match_table[] = {
        { .compatible = "renesas,ether-r8a7779", .data = &r8a777x_data },
        { .compatible = "renesas,ether-r8a7790", .data = &r8a779x_data },
        { .compatible = "renesas,ether-r8a7791", .data = &r8a779x_data },
+       { .compatible = "renesas,ether-r8a7794", .data = &r8a779x_data },
        { .compatible = "renesas,ether-r7s72100", .data = &r7s72100_data },
        { }
 };
@@ -2978,6 +2973,7 @@ static struct platform_device_id sh_eth_id_table[] = {
        { "r8a777x-ether", (kernel_ulong_t)&r8a777x_data },
        { "r8a7790-ether", (kernel_ulong_t)&r8a779x_data },
        { "r8a7791-ether", (kernel_ulong_t)&r8a779x_data },
+       { "r8a7794-ether", (kernel_ulong_t)&r8a779x_data },
        { }
 };
 MODULE_DEVICE_TABLE(platform, sh_eth_id_table);
index b5ed30a..002d4cd 100644 (file)
@@ -755,6 +755,8 @@ static int efx_ef10_reset(struct efx_nic *efx, enum reset_type reset_type)
        { NULL, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
 #define EF10_OTHER_STAT(ext_name)                              \
        [EF10_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name)                              \
+       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(tx_bytes, TX_BYTES),
@@ -798,6 +800,8 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
        EF10_DMA_STAT(rx_align_error, RX_ALIGN_ERROR_PKTS),
        EF10_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
        EF10_DMA_STAT(rx_nodesc_drops, RX_NODESC_DROPS),
+       GENERIC_SW_STAT(rx_nodesc_trunc),
+       GENERIC_SW_STAT(rx_noskb_drops),
        EF10_DMA_STAT(rx_pm_trunc_bb_overflow, PM_TRUNC_BB_OVERFLOW),
        EF10_DMA_STAT(rx_pm_discard_bb_overflow, PM_DISCARD_BB_OVERFLOW),
        EF10_DMA_STAT(rx_pm_trunc_vfifo_full, PM_TRUNC_VFIFO_FULL),
@@ -841,7 +845,9 @@ static const struct efx_hw_stat_desc efx_ef10_stat_desc[EF10_STAT_COUNT] = {
                               (1ULL << EF10_STAT_rx_gtjumbo) |         \
                               (1ULL << EF10_STAT_rx_bad_gtjumbo) |     \
                               (1ULL << EF10_STAT_rx_overflow) |        \
-                              (1ULL << EF10_STAT_rx_nodesc_drops))
+                              (1ULL << EF10_STAT_rx_nodesc_drops) |    \
+                              (1ULL << GENERIC_STAT_rx_nodesc_trunc) | \
+                              (1ULL << GENERIC_STAT_rx_noskb_drops))
 
 /* These statistics are only provided by the 10G MAC.  For a 10G/40G
  * switchable port we do not expose these because they might not
@@ -951,7 +957,7 @@ static int efx_ef10_try_update_nic_stats(struct efx_nic *efx)
                stats[EF10_STAT_rx_bytes_minus_good_bytes];
        efx_update_diff_stat(&stats[EF10_STAT_rx_bad_bytes],
                             stats[EF10_STAT_rx_bytes_minus_good_bytes]);
-
+       efx_update_sw_stats(efx, stats);
        return 0;
 }
 
@@ -990,7 +996,9 @@ static size_t efx_ef10_update_stats(struct efx_nic *efx, u64 *full_stats,
                core_stats->tx_packets = stats[EF10_STAT_tx_packets];
                core_stats->rx_bytes = stats[EF10_STAT_rx_bytes];
                core_stats->tx_bytes = stats[EF10_STAT_tx_bytes];
-               core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops];
+               core_stats->rx_dropped = stats[EF10_STAT_rx_nodesc_drops] +
+                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
                core_stats->multicast = stats[EF10_STAT_rx_multicast];
                core_stats->rx_length_errors =
                        stats[EF10_STAT_rx_gtjumbo] +
index 1e27404..4cebe9d 100644 (file)
@@ -272,6 +272,9 @@ static int efx_poll(struct napi_struct *napi, int budget)
        struct efx_nic *efx = channel->efx;
        int spent;
 
+       if (!efx_channel_lock_napi(channel))
+               return budget;
+
        netif_vdbg(efx, intr, efx->net_dev,
                   "channel %d NAPI poll executing on CPU %d\n",
                   channel->channel, raw_smp_processor_id());
@@ -311,6 +314,7 @@ static int efx_poll(struct napi_struct *napi, int budget)
                efx_nic_eventq_read_ack(channel);
        }
 
+       efx_channel_unlock_napi(channel);
        return spent;
 }
 
@@ -357,7 +361,7 @@ static int efx_init_eventq(struct efx_channel *channel)
 }
 
 /* Enable event queue processing and NAPI */
-static void efx_start_eventq(struct efx_channel *channel)
+void efx_start_eventq(struct efx_channel *channel)
 {
        netif_dbg(channel->efx, ifup, channel->efx->net_dev,
                  "chan %d start event queue\n", channel->channel);
@@ -366,17 +370,20 @@ static void efx_start_eventq(struct efx_channel *channel)
        channel->enabled = true;
        smp_wmb();
 
+       efx_channel_enable(channel);
        napi_enable(&channel->napi_str);
        efx_nic_eventq_read_ack(channel);
 }
 
 /* Disable event queue processing and NAPI */
-static void efx_stop_eventq(struct efx_channel *channel)
+void efx_stop_eventq(struct efx_channel *channel)
 {
        if (!channel->enabled)
                return;
 
        napi_disable(&channel->napi_str);
+       while (!efx_channel_disable(channel))
+               usleep_range(1000, 20000);
        channel->enabled = false;
 }
 
@@ -1960,6 +1967,8 @@ static void efx_init_napi_channel(struct efx_channel *channel)
        channel->napi_dev = efx->net_dev;
        netif_napi_add(channel->napi_dev, &channel->napi_str,
                       efx_poll, napi_weight);
+       napi_hash_add(&channel->napi_str);
+       efx_channel_init_lock(channel);
 }
 
 static void efx_init_napi(struct efx_nic *efx)
@@ -1972,8 +1981,10 @@ static void efx_init_napi(struct efx_nic *efx)
 
 static void efx_fini_napi_channel(struct efx_channel *channel)
 {
-       if (channel->napi_dev)
+       if (channel->napi_dev) {
                netif_napi_del(&channel->napi_str);
+               napi_hash_del(&channel->napi_str);
+       }
        channel->napi_dev = NULL;
 }
 
@@ -2008,6 +2019,37 @@ static void efx_netpoll(struct net_device *net_dev)
 
 #endif
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static int efx_busy_poll(struct napi_struct *napi)
+{
+       struct efx_channel *channel =
+               container_of(napi, struct efx_channel, napi_str);
+       struct efx_nic *efx = channel->efx;
+       int budget = 4;
+       int old_rx_packets, rx_packets;
+
+       if (!netif_running(efx->net_dev))
+               return LL_FLUSH_FAILED;
+
+       if (!efx_channel_lock_poll(channel))
+               return LL_FLUSH_BUSY;
+
+       old_rx_packets = channel->rx_queue.rx_packets;
+       efx_process_channel(channel, budget);
+
+       rx_packets = channel->rx_queue.rx_packets - old_rx_packets;
+
+       /* There is no race condition with NAPI here.
+        * NAPI will automatically be rescheduled if it yielded during busy
+        * polling, because it was not able to take the lock and thus returned
+        * the full budget.
+        */
+       efx_channel_unlock_poll(channel);
+
+       return rx_packets;
+}
+#endif
+
 /**************************************************************************
  *
  * Kernel net device interface
@@ -2177,6 +2219,9 @@ static const struct net_device_ops efx_farch_netdev_ops = {
        .ndo_poll_controller = efx_netpoll,
 #endif
        .ndo_setup_tc           = efx_setup_tc,
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = efx_busy_poll,
+#endif
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
 #endif
@@ -2197,6 +2242,9 @@ static const struct net_device_ops efx_ef10_netdev_ops = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller    = efx_netpoll,
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = efx_busy_poll,
+#endif
 #ifdef CONFIG_RFS_ACCEL
        .ndo_rx_flow_steer      = efx_filter_rfs,
 #endif
@@ -2607,6 +2655,8 @@ static DEFINE_PCI_DEVICE_TABLE(efx_pci_table) = {
         .driver_data = (unsigned long) &siena_a0_nic_type},
        {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0903),  /* SFC9120 PF */
         .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
+       {PCI_DEVICE(PCI_VENDOR_ID_SOLARFLARE, 0x0923),  /* SFC9140 PF */
+        .driver_data = (unsigned long) &efx_hunt_a0_nic_type},
        {0}                     /* end of list */
 };
 
@@ -2722,6 +2772,17 @@ static void efx_fini_struct(struct efx_nic *efx)
        }
 }
 
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats)
+{
+       u64 n_rx_nodesc_trunc = 0;
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx)
+               n_rx_nodesc_trunc += channel->n_rx_nodesc_trunc;
+       stats[GENERIC_STAT_rx_nodesc_trunc] = n_rx_nodesc_trunc;
+       stats[GENERIC_STAT_rx_noskb_drops] = atomic_read(&efx->n_rx_noskb_drops);
+}
+
 /**************************************************************************
  *
  * PCI interface
index 9903258..2587c58 100644 (file)
@@ -194,11 +194,16 @@ int efx_init_irq_moderation(struct efx_nic *efx, unsigned int tx_usecs,
                            bool rx_may_override_tx);
 void efx_get_irq_moderation(struct efx_nic *efx, unsigned int *tx_usecs,
                            unsigned int *rx_usecs, bool *rx_adaptive);
+void efx_stop_eventq(struct efx_channel *channel);
+void efx_start_eventq(struct efx_channel *channel);
 
 /* Dummy PHY ops for PHY drivers */
 int efx_port_dummy_op_int(struct efx_nic *efx);
 void efx_port_dummy_op_void(struct efx_nic *efx);
 
+/* Update the generic software stats in the passed stats array */
+void efx_update_sw_stats(struct efx_nic *efx, u64 *stats);
+
 /* MTD */
 #ifdef CONFIG_SFC_MTD
 int efx_mtd_add(struct efx_nic *efx, struct efx_mtd_partition *parts,
index 74739c4..cad258a 100644 (file)
@@ -77,7 +77,6 @@ static const struct efx_sw_stat_desc efx_sw_stat_desc[] = {
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_tcp_udp_chksum_err),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_mcast_mismatch),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_frm_trunc),
-       EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_nodesc_trunc),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_events),
        EFX_ETHTOOL_UINT_CHANNEL_STAT(rx_merge_packets),
 };
@@ -360,6 +359,37 @@ static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
        return n;
 }
 
+static size_t efx_describe_per_queue_stats(struct efx_nic *efx, u8 *strings)
+{
+       size_t n_stats = 0;
+       struct efx_channel *channel;
+
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_tx_queues(channel)) {
+                       n_stats++;
+                       if (strings != NULL) {
+                               snprintf(strings, ETH_GSTRING_LEN,
+                                        "tx-%u.tx_packets",
+                                        channel->tx_queue[0].queue /
+                                        EFX_TXQ_TYPES);
+
+                               strings += ETH_GSTRING_LEN;
+                       }
+               }
+       }
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_rx_queue(channel)) {
+                       n_stats++;
+                       if (strings != NULL) {
+                               snprintf(strings, ETH_GSTRING_LEN,
+                                        "rx-%d.rx_packets", channel->channel);
+                               strings += ETH_GSTRING_LEN;
+                       }
+               }
+       }
+       return n_stats;
+}
+
 static int efx_ethtool_get_sset_count(struct net_device *net_dev,
                                      int string_set)
 {
@@ -368,8 +398,9 @@ static int efx_ethtool_get_sset_count(struct net_device *net_dev,
        switch (string_set) {
        case ETH_SS_STATS:
                return efx->type->describe_stats(efx, NULL) +
-                       EFX_ETHTOOL_SW_STAT_COUNT +
-                       efx_ptp_describe_stats(efx, NULL);
+                      EFX_ETHTOOL_SW_STAT_COUNT +
+                      efx_describe_per_queue_stats(efx, NULL) +
+                      efx_ptp_describe_stats(efx, NULL);
        case ETH_SS_TEST:
                return efx_ethtool_fill_self_tests(efx, NULL, NULL, NULL);
        default:
@@ -391,6 +422,8 @@ static void efx_ethtool_get_strings(struct net_device *net_dev,
                        strlcpy(strings + i * ETH_GSTRING_LEN,
                                efx_sw_stat_desc[i].name, ETH_GSTRING_LEN);
                strings += EFX_ETHTOOL_SW_STAT_COUNT * ETH_GSTRING_LEN;
+               strings += (efx_describe_per_queue_stats(efx, strings) *
+                           ETH_GSTRING_LEN);
                efx_ptp_describe_stats(efx, strings);
                break;
        case ETH_SS_TEST:
@@ -410,6 +443,7 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
        const struct efx_sw_stat_desc *stat;
        struct efx_channel *channel;
        struct efx_tx_queue *tx_queue;
+       struct efx_rx_queue *rx_queue;
        int i;
 
        spin_lock_bh(&efx->stats_lock);
@@ -445,6 +479,25 @@ static void efx_ethtool_get_stats(struct net_device *net_dev,
 
        spin_unlock_bh(&efx->stats_lock);
 
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_tx_queues(channel)) {
+                       *data = 0;
+                       efx_for_each_channel_tx_queue(tx_queue, channel) {
+                               *data += tx_queue->tx_packets;
+                       }
+                       data++;
+               }
+       }
+       efx_for_each_channel(channel, efx) {
+               if (efx_channel_has_rx_queue(channel)) {
+                       *data = 0;
+                       efx_for_each_channel_rx_queue(rx_queue, channel) {
+                               *data += rx_queue->rx_packets;
+                       }
+                       data++;
+               }
+       }
+
        efx_ptp_update_stats(efx, data);
 }
 
index fae25a4..1570375 100644 (file)
          hw_name ## _ ## offset }
 #define FALCON_OTHER_STAT(ext_name)                                    \
        [FALCON_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name)                              \
+       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
        FALCON_DMA_STAT(tx_bytes, XgTxOctets),
@@ -191,6 +193,8 @@ static const struct efx_hw_stat_desc falcon_stat_desc[FALCON_STAT_COUNT] = {
        FALCON_DMA_STAT(rx_length_error, XgRxLengthError),
        FALCON_DMA_STAT(rx_internal_error, XgRxInternalMACError),
        FALCON_OTHER_STAT(rx_nodesc_drop_cnt),
+       GENERIC_SW_STAT(rx_nodesc_trunc),
+       GENERIC_SW_STAT(rx_noskb_drops),
 };
 static const unsigned long falcon_stat_mask[] = {
        [0 ... BITS_TO_LONGS(FALCON_STAT_COUNT) - 1] = ~0UL,
@@ -2574,6 +2578,7 @@ static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
                                     stats[FALCON_STAT_rx_bytes] -
                                     stats[FALCON_STAT_rx_good_bytes] -
                                     stats[FALCON_STAT_rx_control] * 64);
+               efx_update_sw_stats(efx, stats);
        }
 
        if (full_stats)
@@ -2584,7 +2589,9 @@ static size_t falcon_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
                core_stats->tx_packets = stats[FALCON_STAT_tx_packets];
                core_stats->rx_bytes = stats[FALCON_STAT_rx_bytes];
                core_stats->tx_bytes = stats[FALCON_STAT_tx_bytes];
-               core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt];
+               core_stats->rx_dropped = stats[FALCON_STAT_rx_nodesc_drop_cnt] +
+                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
                core_stats->multicast = stats[FALCON_STAT_rx_multicast];
                core_stats->rx_length_errors =
                        stats[FALCON_STAT_rx_gtjumbo] +
index e5fc4e1..fb19b70 100644 (file)
@@ -183,6 +183,8 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
                        result |= SUPPORTED_1000baseKX_Full;
                if (cap & (1 << MC_CMD_PHY_CAP_10000FDX_LBN))
                        result |= SUPPORTED_10000baseKX4_Full;
+               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+                       result |= SUPPORTED_40000baseKR4_Full;
                break;
 
        case MC_CMD_MEDIA_XFP:
@@ -190,6 +192,12 @@ static u32 mcdi_to_ethtool_cap(u32 media, u32 cap)
                result |= SUPPORTED_FIBRE;
                break;
 
+       case MC_CMD_MEDIA_QSFP_PLUS:
+               result |= SUPPORTED_FIBRE;
+               if (cap & (1 << MC_CMD_PHY_CAP_40000FDX_LBN))
+                       result |= SUPPORTED_40000baseCR4_Full;
+               break;
+
        case MC_CMD_MEDIA_BASE_T:
                result |= SUPPORTED_TP;
                if (cap & (1 << MC_CMD_PHY_CAP_10HDX_LBN))
@@ -237,6 +245,8 @@ static u32 ethtool_to_mcdi_cap(u32 cap)
                result |= (1 << MC_CMD_PHY_CAP_1000FDX_LBN);
        if (cap & (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full))
                result |= (1 << MC_CMD_PHY_CAP_10000FDX_LBN);
+       if (cap & (SUPPORTED_40000baseCR4_Full | SUPPORTED_40000baseKR4_Full))
+               result |= (1 << MC_CMD_PHY_CAP_40000FDX_LBN);
        if (cap & SUPPORTED_Pause)
                result |= (1 << MC_CMD_PHY_CAP_PAUSE_LBN);
        if (cap & SUPPORTED_Asym_Pause)
@@ -285,6 +295,7 @@ static u32 mcdi_to_ethtool_media(u32 media)
 
        case MC_CMD_MEDIA_XFP:
        case MC_CMD_MEDIA_SFP_PLUS:
+       case MC_CMD_MEDIA_QSFP_PLUS:
                return PORT_FIBRE;
 
        case MC_CMD_MEDIA_BASE_T:
index 5bdae8e..9ede320 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/vmalloc.h>
 #include <linux/i2c.h>
 #include <linux/mtd/mtd.h>
+#include <net/busy_poll.h>
 
 #include "enum.h"
 #include "bitfield.h"
@@ -249,6 +250,8 @@ struct efx_tx_queue {
        unsigned int tso_packets;
        unsigned int pushes;
        unsigned int pio_packets;
+       /* Statistics to supplement MAC stats */
+       unsigned long tx_packets;
 
        /* Members shared between paths and sometimes updated */
        unsigned int empty_read_count ____cacheline_aligned_in_smp;
@@ -358,6 +361,8 @@ struct efx_rx_queue {
        unsigned int recycle_count;
        struct timer_list slow_fill;
        unsigned int slow_fill_count;
+       /* Statistics to supplement MAC stats */
+       unsigned long rx_packets;
 };
 
 enum efx_sync_events_state {
@@ -383,6 +388,8 @@ enum efx_sync_events_state {
  * @irq_moderation: IRQ moderation value (in hardware ticks)
  * @napi_dev: Net device used with NAPI
  * @napi_str: NAPI control structure
+ * @state: state for NAPI vs busy polling
+ * @state_lock: lock protecting @state
  * @eventq: Event queue buffer
  * @eventq_mask: Event queue pointer mask
  * @eventq_read_ptr: Event queue read pointer
@@ -420,6 +427,22 @@ struct efx_channel {
        unsigned int irq_moderation;
        struct net_device *napi_dev;
        struct napi_struct napi_str;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       unsigned int state;
+       spinlock_t state_lock;
+#define EFX_CHANNEL_STATE_IDLE         0
+#define EFX_CHANNEL_STATE_NAPI         (1 << 0)  /* NAPI owns this channel */
+#define EFX_CHANNEL_STATE_POLL         (1 << 1)  /* poll owns this channel */
+#define EFX_CHANNEL_STATE_DISABLED     (1 << 2)  /* channel is disabled */
+#define EFX_CHANNEL_STATE_NAPI_YIELD   (1 << 3)  /* NAPI yielded this channel */
+#define EFX_CHANNEL_STATE_POLL_YIELD   (1 << 4)  /* poll yielded this channel */
+#define EFX_CHANNEL_OWNED \
+       (EFX_CHANNEL_STATE_NAPI | EFX_CHANNEL_STATE_POLL)
+#define EFX_CHANNEL_LOCKED \
+       (EFX_CHANNEL_OWNED | EFX_CHANNEL_STATE_DISABLED)
+#define EFX_CHANNEL_USER_PEND \
+       (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_POLL_YIELD)
+#endif /* CONFIG_NET_RX_BUSY_POLL */
        struct efx_special_buffer eventq;
        unsigned int eventq_mask;
        unsigned int eventq_read_ptr;
@@ -453,6 +476,135 @@ struct efx_channel {
        u32 sync_timestamp_minor;
 };
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+static inline void efx_channel_init_lock(struct efx_channel *channel)
+{
+       spin_lock_init(&channel->state_lock);
+}
+
+/* Called from the device poll routine to get ownership of a channel. */
+static inline bool efx_channel_lock_napi(struct efx_channel *channel)
+{
+       bool rc = true;
+
+       spin_lock_bh(&channel->state_lock);
+       if (channel->state & EFX_CHANNEL_LOCKED) {
+               WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
+               channel->state |= EFX_CHANNEL_STATE_NAPI_YIELD;
+               rc = false;
+       } else {
+               /* we don't care if someone yielded */
+               channel->state = EFX_CHANNEL_STATE_NAPI;
+       }
+       spin_unlock_bh(&channel->state_lock);
+       return rc;
+}
+
+static inline void efx_channel_unlock_napi(struct efx_channel *channel)
+{
+       spin_lock_bh(&channel->state_lock);
+       WARN_ON(channel->state &
+               (EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_YIELD));
+
+       channel->state &= EFX_CHANNEL_STATE_DISABLED;
+       spin_unlock_bh(&channel->state_lock);
+}
+
+/* Called from efx_busy_poll(). */
+static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+{
+       bool rc = true;
+
+       spin_lock_bh(&channel->state_lock);
+       if ((channel->state & EFX_CHANNEL_LOCKED)) {
+               channel->state |= EFX_CHANNEL_STATE_POLL_YIELD;
+               rc = false;
+       } else {
+               /* preserve yield marks */
+               channel->state |= EFX_CHANNEL_STATE_POLL;
+       }
+       spin_unlock_bh(&channel->state_lock);
+       return rc;
+}
+
+/* Returns true if NAPI tried to get the channel while it was locked. */
+static inline void efx_channel_unlock_poll(struct efx_channel *channel)
+{
+       spin_lock_bh(&channel->state_lock);
+       WARN_ON(channel->state & EFX_CHANNEL_STATE_NAPI);
+
+       /* will reset state to idle, unless channel is disabled */
+       channel->state &= EFX_CHANNEL_STATE_DISABLED;
+       spin_unlock_bh(&channel->state_lock);
+}
+
+/* True if a socket is polling, even if it did not get the lock. */
+static inline bool efx_channel_busy_polling(struct efx_channel *channel)
+{
+       WARN_ON(!(channel->state & EFX_CHANNEL_OWNED));
+       return channel->state & EFX_CHANNEL_USER_PEND;
+}
+
+static inline void efx_channel_enable(struct efx_channel *channel)
+{
+       spin_lock_bh(&channel->state_lock);
+       channel->state = EFX_CHANNEL_STATE_IDLE;
+       spin_unlock_bh(&channel->state_lock);
+}
+
+/* False if the channel is currently owned. */
+static inline bool efx_channel_disable(struct efx_channel *channel)
+{
+       bool rc = true;
+
+       spin_lock_bh(&channel->state_lock);
+       if (channel->state & EFX_CHANNEL_OWNED)
+               rc = false;
+       channel->state |= EFX_CHANNEL_STATE_DISABLED;
+       spin_unlock_bh(&channel->state_lock);
+
+       return rc;
+}
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+
+static inline void efx_channel_init_lock(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_lock_napi(struct efx_channel *channel)
+{
+       return true;
+}
+
+static inline void efx_channel_unlock_napi(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_lock_poll(struct efx_channel *channel)
+{
+       return false;
+}
+
+static inline void efx_channel_unlock_poll(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_busy_polling(struct efx_channel *channel)
+{
+       return false;
+}
+
+static inline void efx_channel_enable(struct efx_channel *channel)
+{
+}
+
+static inline bool efx_channel_disable(struct efx_channel *channel)
+{
+       return true;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 /**
  * struct efx_msi_context - Context for each MSI
  * @efx: The associated NIC
@@ -777,6 +929,7 @@ struct vfdi_status;
  *     interrupt has occurred.
  * @stats_lock: Statistics update lock. Must be held when calling
  *     efx_nic_type::{update,start,stop}_stats.
+ * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
  *
  * This is stored in the private area of the &struct net_device.
  */
@@ -930,6 +1083,7 @@ struct efx_nic {
        spinlock_t biu_lock;
        int last_irq_cpu;
        spinlock_t stats_lock;
+       atomic_t n_rx_noskb_drops;
 };
 
 static inline int efx_dev_registered(struct efx_nic *efx)
index d3ad8ed..60f8514 100644 (file)
@@ -135,6 +135,13 @@ enum {
 /* Size and alignment of buffer table entries (same) */
 #define EFX_BUF_SIZE   EFX_PAGE_SIZE
 
+/* NIC-generic software stats */
+enum {
+       GENERIC_STAT_rx_noskb_drops,
+       GENERIC_STAT_rx_nodesc_trunc,
+       GENERIC_STAT_COUNT
+};
+
 /**
  * struct falcon_board_type - board operations and type information
  * @id: Board type id, as found in NVRAM
@@ -205,7 +212,7 @@ static inline bool falcon_spi_present(const struct falcon_spi_device *spi)
 }
 
 enum {
-       FALCON_STAT_tx_bytes,
+       FALCON_STAT_tx_bytes = GENERIC_STAT_COUNT,
        FALCON_STAT_tx_packets,
        FALCON_STAT_tx_pause,
        FALCON_STAT_tx_control,
@@ -290,7 +297,7 @@ static inline struct falcon_board *falcon_board(struct efx_nic *efx)
 }
 
 enum {
-       SIENA_STAT_tx_bytes,
+       SIENA_STAT_tx_bytes = GENERIC_STAT_COUNT,
        SIENA_STAT_tx_good_bytes,
        SIENA_STAT_tx_bad_bytes,
        SIENA_STAT_tx_packets,
@@ -361,7 +368,7 @@ struct siena_nic_data {
 };
 
 enum {
-       EF10_STAT_tx_bytes,
+       EF10_STAT_tx_bytes = GENERIC_STAT_COUNT,
        EF10_STAT_tx_packets,
        EF10_STAT_tx_pause,
        EF10_STAT_tx_control,
index 48588dd..c0ad95d 100644 (file)
@@ -462,6 +462,7 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
 
        skb_record_rx_queue(skb, channel->rx_queue.core_index);
 
+       skb_mark_napi_id(skb, &channel->napi_str);
        gro_result = napi_gro_frags(napi);
        if (gro_result != GRO_DROP)
                channel->irq_mod_score += 2;
@@ -480,8 +481,10 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
        skb = netdev_alloc_skb(efx->net_dev,
                               efx->rx_ip_align + efx->rx_prefix_size +
                               hdr_len);
-       if (unlikely(skb == NULL))
+       if (unlikely(skb == NULL)) {
+               atomic_inc(&efx->n_rx_noskb_drops);
                return NULL;
+       }
 
        EFX_BUG_ON_PARANOID(rx_buf->len < hdr_len);
 
@@ -518,6 +521,8 @@ static struct sk_buff *efx_rx_mk_skb(struct efx_channel *channel,
        /* Move past the ethernet header */
        skb->protocol = eth_type_trans(skb, efx->net_dev);
 
+       skb_mark_napi_id(skb, &channel->napi_str);
+
        return skb;
 }
 
@@ -528,6 +533,8 @@ void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
        struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
        struct efx_rx_buffer *rx_buf;
 
+       rx_queue->rx_packets++;
+
        rx_buf = efx_rx_buffer(rx_queue, index);
        rx_buf->flags |= flags;
 
@@ -662,7 +669,8 @@ void __efx_rx_packet(struct efx_channel *channel)
        if (unlikely(!(efx->net_dev->features & NETIF_F_RXCSUM)))
                rx_buf->flags &= ~EFX_RX_PKT_CSUMMED;
 
-       if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb)
+       if ((rx_buf->flags & EFX_RX_PKT_TCP) && !channel->type->receive_skb &&
+           !efx_channel_busy_polling(channel))
                efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh);
        else
                efx_rx_deliver(channel, eh, rx_buf, channel->rx_pkt_n_frags);
index 0fc5bae..10b6173 100644 (file)
@@ -188,7 +188,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx,
                schedule_timeout_uninterruptible(wait);
 
                efx_for_each_channel(channel, efx) {
-                       napi_disable(&channel->napi_str);
+                       efx_stop_eventq(channel);
                        if (channel->eventq_read_ptr !=
                            read_ptr[channel->channel]) {
                                set_bit(channel->channel, &napi_ran);
@@ -200,8 +200,7 @@ static int efx_test_eventq_irq(struct efx_nic *efx,
                                if (efx_nic_event_test_irq_cpu(channel) >= 0)
                                        clear_bit(channel->channel, &int_pend);
                        }
-                       napi_enable(&channel->napi_str);
-                       efx_nic_eventq_read_ack(channel);
+                       efx_start_eventq(channel);
                }
 
                wait *= 2;
index 50ffefe..ae69685 100644 (file)
@@ -424,6 +424,8 @@ static void siena_remove_nic(struct efx_nic *efx)
        { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
 #define SIENA_OTHER_STAT(ext_name)                             \
        [SIENA_STAT_ ## ext_name] = { #ext_name, 0, 0 }
+#define GENERIC_SW_STAT(ext_name)                              \
+       [GENERIC_STAT_ ## ext_name] = { #ext_name, 0, 0 }
 
 static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
        SIENA_DMA_STAT(tx_bytes, TX_BYTES),
@@ -483,6 +485,8 @@ static const struct efx_hw_stat_desc siena_stat_desc[SIENA_STAT_COUNT] = {
        SIENA_DMA_STAT(rx_length_error, RX_LENGTH_ERROR_PKTS),
        SIENA_DMA_STAT(rx_internal_error, RX_INTERNAL_ERROR_PKTS),
        SIENA_DMA_STAT(rx_nodesc_drop_cnt, RX_NODESC_DROPS),
+       GENERIC_SW_STAT(rx_nodesc_trunc),
+       GENERIC_SW_STAT(rx_noskb_drops),
 };
 static const unsigned long siena_stat_mask[] = {
        [0 ... BITS_TO_LONGS(SIENA_STAT_COUNT) - 1] = ~0UL,
@@ -528,6 +532,7 @@ static int siena_try_update_nic_stats(struct efx_nic *efx)
        efx_update_diff_stat(&stats[SIENA_STAT_rx_good_bytes],
                             stats[SIENA_STAT_rx_bytes] -
                             stats[SIENA_STAT_rx_bad_bytes]);
+       efx_update_sw_stats(efx, stats);
        return 0;
 }
 
@@ -554,7 +559,9 @@ static size_t siena_update_nic_stats(struct efx_nic *efx, u64 *full_stats,
                core_stats->tx_packets = stats[SIENA_STAT_tx_packets];
                core_stats->rx_bytes = stats[SIENA_STAT_rx_bytes];
                core_stats->tx_bytes = stats[SIENA_STAT_tx_bytes];
-               core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt];
+               core_stats->rx_dropped = stats[SIENA_STAT_rx_nodesc_drop_cnt] +
+                                        stats[GENERIC_STAT_rx_nodesc_trunc] +
+                                        stats[GENERIC_STAT_rx_noskb_drops];
                core_stats->multicast = stats[SIENA_STAT_rx_multicast];
                core_stats->collisions = stats[SIENA_STAT_tx_collision];
                core_stats->rx_length_errors =
index ede8dcc..65c220f 100644 (file)
@@ -189,18 +189,6 @@ struct efx_short_copy_buffer {
        u8 buf[L1_CACHE_BYTES];
 };
 
-/* Copy in explicit 64-bit writes. */
-static void efx_memcpy_64(void __iomem *dest, void *src, size_t len)
-{
-       u64 *src64 = src;
-       u64 __iomem *dest64 = dest;
-       size_t l64 = len / 8;
-       size_t i;
-
-       for (i = 0; i < l64; i++)
-               writeq(src64[i], &dest64[i]);
-}
-
 /* Copy to PIO, respecting that writes to PIO buffers must be dword aligned.
  * Advances piobuf pointer. Leaves additional data in the copy buffer.
  */
@@ -210,7 +198,7 @@ static void efx_memcpy_toio_aligned(struct efx_nic *efx, u8 __iomem **piobuf,
 {
        int block_len = len & ~(sizeof(copy_buf->buf) - 1);
 
-       efx_memcpy_64(*piobuf, data, block_len);
+       __iowrite64_copy(*piobuf, data, block_len >> 3);
        *piobuf += block_len;
        len -= block_len;
 
@@ -242,7 +230,8 @@ static void efx_memcpy_toio_aligned_cb(struct efx_nic *efx, u8 __iomem **piobuf,
                if (copy_buf->used < sizeof(copy_buf->buf))
                        return;
 
-               efx_memcpy_64(*piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+               __iowrite64_copy(*piobuf, copy_buf->buf,
+                                sizeof(copy_buf->buf) >> 3);
                *piobuf += sizeof(copy_buf->buf);
                data += copy_to_buf;
                len -= copy_to_buf;
@@ -257,7 +246,8 @@ static void efx_flush_copy_buffer(struct efx_nic *efx, u8 __iomem *piobuf,
 {
        /* if there's anything in it, write the whole buffer, including junk */
        if (copy_buf->used)
-               efx_memcpy_64(piobuf, copy_buf->buf, sizeof(copy_buf->buf));
+               __iowrite64_copy(piobuf, copy_buf->buf,
+                                sizeof(copy_buf->buf) >> 3);
 }
 
 /* Traverse skb structure and copy fragments in to PIO buffer.
@@ -316,8 +306,8 @@ efx_enqueue_skb_pio(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
                 */
                BUILD_BUG_ON(L1_CACHE_BYTES >
                             SKB_DATA_ALIGN(sizeof(struct skb_shared_info)));
-               efx_memcpy_64(tx_queue->piobuf, skb->data,
-                             ALIGN(skb->len, L1_CACHE_BYTES));
+               __iowrite64_copy(tx_queue->piobuf, skb->data,
+                                ALIGN(skb->len, L1_CACHE_BYTES) >> 3);
        }
 
        EFX_POPULATE_QWORD_5(buffer->option,
@@ -452,6 +442,8 @@ finish_packet:
        /* Pass off to hardware */
        efx_nic_push_buffers(tx_queue);
 
+       tx_queue->tx_packets++;
+
        efx_tx_maybe_stop_queue(tx_queue);
 
        return NETDEV_TX_OK;
@@ -1245,6 +1237,8 @@ static int tso_start_new_packet(struct efx_tx_queue *tx_queue,
 
        ++tx_queue->tso_packets;
 
+       ++tx_queue->tx_packets;
+
        return 0;
 }
 
index 6072f09..7bea17c 100644 (file)
@@ -2258,7 +2258,6 @@ static int sis900_set_config(struct net_device *dev, struct ifmap *map)
                case IF_PORT_100BASEFX: /* 100BaseFx */
                        /* These Modes are not supported (are they?)*/
                        return -EOPNOTSUPP;
-                       break;
 
                default:
                        return -EINVAL;
index 1c44e67..9778cba 100644 (file)
@@ -730,7 +730,7 @@ static void smc911x_phy_detect(struct net_device *dev)
                        lp->phy_type = id1 << 16 | id2;
        }
 
-       DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%d\n",
+       DBG(SMC_DEBUG_MISC, dev, "phy_id1=0x%x, phy_id2=0x%x phyaddr=0x%x\n",
            id1, id2, lp->mii.phy_id);
 }
 
index 74610f3..de507c3 100644 (file)
@@ -368,34 +368,36 @@ struct stmmac_dma_ops {
        void (*rx_watchdog) (void __iomem *ioaddr, u32 riwt);
 };
 
+struct mac_device_info;
+
 struct stmmac_ops {
        /* MAC core initialization */
-       void (*core_init) (void __iomem *ioaddr, int mtu);
+       void (*core_init)(struct mac_device_info *hw, int mtu);
        /* Enable and verify that the IPC module is supported */
-       int (*rx_ipc) (void __iomem *ioaddr);
+       int (*rx_ipc)(struct mac_device_info *hw);
        /* Dump MAC registers */
-       void (*dump_regs) (void __iomem *ioaddr);
+       void (*dump_regs)(struct mac_device_info *hw);
        /* Handle extra events on specific interrupts hw dependent */
-       int (*host_irq_status) (void __iomem *ioaddr,
-                               struct stmmac_extra_stats *x);
+       int (*host_irq_status)(struct mac_device_info *hw,
+                              struct stmmac_extra_stats *x);
        /* Multicast filter setting */
-       void (*set_filter) (struct net_device *dev, int id);
+       void (*set_filter)(struct mac_device_info *hw, struct net_device *dev);
        /* Flow control setting */
-       void (*flow_ctrl) (void __iomem *ioaddr, unsigned int duplex,
-                          unsigned int fc, unsigned int pause_time);
+       void (*flow_ctrl)(struct mac_device_info *hw, unsigned int duplex,
+                         unsigned int fc, unsigned int pause_time);
        /* Set power management mode (e.g. magic frame) */
-       void (*pmt) (void __iomem *ioaddr, unsigned long mode);
+       void (*pmt)(struct mac_device_info *hw, unsigned long mode);
        /* Set/Get Unicast MAC addresses */
-       void (*set_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
-                              unsigned int reg_n);
-       void (*get_umac_addr) (void __iomem *ioaddr, unsigned char *addr,
-                              unsigned int reg_n);
-       void (*set_eee_mode) (void __iomem *ioaddr);
-       void (*reset_eee_mode) (void __iomem *ioaddr);
-       void (*set_eee_timer) (void __iomem *ioaddr, int ls, int tw);
-       void (*set_eee_pls) (void __iomem *ioaddr, int link);
-       void (*ctrl_ane) (void __iomem *ioaddr, bool restart);
-       void (*get_adv) (void __iomem *ioaddr, struct rgmii_adv *adv);
+       void (*set_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+                             unsigned int reg_n);
+       void (*get_umac_addr)(struct mac_device_info *hw, unsigned char *addr,
+                             unsigned int reg_n);
+       void (*set_eee_mode)(struct mac_device_info *hw);
+       void (*reset_eee_mode)(struct mac_device_info *hw);
+       void (*set_eee_timer)(struct mac_device_info *hw, int ls, int tw);
+       void (*set_eee_pls)(struct mac_device_info *hw, int link);
+       void (*ctrl_ane)(struct mac_device_info *hw, bool restart);
+       void (*get_adv)(struct mac_device_info *hw, struct rgmii_adv *adv);
 };
 
 struct stmmac_hwtimestamp {
@@ -439,9 +441,14 @@ struct mac_device_info {
        struct mii_regs mii;    /* MII register Addresses */
        struct mac_link link;
        unsigned int synopsys_uid;
+       void __iomem *pcsr;     /* vpointer to device CSRs */
+       int multicast_filter_bins;
+       int unicast_filter_entries;
+       int mcast_bits_log2;
 };
 
-struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr);
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
+                                       int perfect_uc_entries);
 struct mac_device_info *dwmac100_setup(void __iomem *ioaddr);
 
 void stmmac_set_mac_addr(void __iomem *ioaddr, u8 addr[6],
index fd8a217..ec632e6 100644 (file)
@@ -20,7 +20,9 @@
 #include <linux/of_net.h>
 #include <linux/phy.h>
 #include <linux/regmap.h>
+#include <linux/reset.h>
 #include <linux/stmmac.h>
+#include "stmmac.h"
 
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_GMII_MII 0x0
 #define SYSMGR_EMACGRP_CTRL_PHYSEL_ENUM_RGMII 0x1
@@ -34,6 +36,7 @@ struct socfpga_dwmac {
        u32     reg_shift;
        struct  device *dev;
        struct regmap *sys_mgr_base_addr;
+       struct reset_control *stmmac_rst;
 };
 
 static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *dev)
@@ -43,6 +46,13 @@ static int socfpga_dwmac_parse_data(struct socfpga_dwmac *dwmac, struct device *
        u32 reg_offset, reg_shift;
        int ret;
 
+       dwmac->stmmac_rst = devm_reset_control_get(dev,
+                                                 STMMAC_RESOURCE_NAME);
+       if (IS_ERR(dwmac->stmmac_rst)) {
+               dev_info(dev, "Could not get reset control!\n");
+               return -EINVAL;
+       }
+
        dwmac->interface = of_get_phy_mode(np);
 
        sys_mgr_base_addr = syscon_regmap_lookup_by_phandle(np, "altr,sysmgr-syscon");
@@ -125,6 +135,65 @@ static void *socfpga_dwmac_probe(struct platform_device *pdev)
        return dwmac;
 }
 
+static void socfpga_dwmac_exit(struct platform_device *pdev, void *priv)
+{
+       struct socfpga_dwmac    *dwmac = priv;
+
+       /* On socfpga platform exit, assert and hold reset to the
+        * enet controller - the default state after a hard reset.
+        */
+       if (dwmac->stmmac_rst)
+               reset_control_assert(dwmac->stmmac_rst);
+}
+
+static int socfpga_dwmac_init(struct platform_device *pdev, void *priv)
+{
+       struct socfpga_dwmac    *dwmac = priv;
+       struct net_device *ndev = platform_get_drvdata(pdev);
+       struct stmmac_priv *stpriv = NULL;
+       int ret = 0;
+
+       if (ndev)
+               stpriv = netdev_priv(ndev);
+
+       /* Assert reset to the enet controller before changing the phy mode */
+       if (dwmac->stmmac_rst)
+               reset_control_assert(dwmac->stmmac_rst);
+
+       /* Setup the phy mode in the system manager registers according to
+        * devicetree configuration
+        */
+       ret = socfpga_dwmac_setup(dwmac);
+
+       /* Deassert reset for the phy configuration to be sampled by
+        * the enet controller, and operation to start in requested mode
+        */
+       if (dwmac->stmmac_rst)
+               reset_control_deassert(dwmac->stmmac_rst);
+
+       /* Before the enet controller is suspended, the phy is suspended.
+        * This causes the phy clock to be gated. The enet controller is
+        * resumed before the phy, so the clock is still gated "off" when
+        * the enet controller is resumed. This code makes sure the phy
+        * is "resumed" before reinitializing the enet controller since
+        * the enet controller depends on an active phy clock to complete
+        * a DMA reset. A DMA reset will "time out" if executed
+        * with no phy clock input on the Synopsys enet controller.
+        * Verified through Synopsys Case #8000711656.
+        *
+        * Note that the phy clock is also gated when the phy is isolated.
+        * Phy "suspend" and "isolate" controls are located in phy basic
+        * control register 0, and can be modified by the phy driver
+        * framework.
+        */
+       if (stpriv && stpriv->phydev)
+               phy_resume(stpriv->phydev);
+
+       return ret;
+}
+
 const struct stmmac_of_data socfpga_gmac_data = {
        .setup = socfpga_dwmac_probe,
+       .init = socfpga_dwmac_init,
+       .exit = socfpga_dwmac_exit,
 };
index f37d90f..71b5419 100644 (file)
@@ -87,7 +87,7 @@ enum power_event {
                                (reg * 8))
 #define GMAC_ADDR_LOW(reg)     (((reg > 15) ? 0x00000804 : 0x00000044) + \
                                (reg * 8))
-#define GMAC_MAX_PERFECT_ADDRESSES     32
+#define GMAC_MAX_PERFECT_ADDRESSES     1
 
 /* PCS registers (AN/TBI/SGMII/RGMII) offset */
 #define GMAC_AN_CTRL   0x000000c0      /* AN control */
@@ -261,6 +261,7 @@ enum rtc_control {
 #define GMAC_MMC_RX_INTR   0x104
 #define GMAC_MMC_TX_INTR   0x108
 #define GMAC_MMC_RX_CSUM_OFFLOAD   0x208
+#define GMAC_EXTHASH_BASE  0x500
 
 extern const struct stmmac_dma_ops dwmac1000_dma_ops;
 #endif /* __DWMAC1000_H__ */
index b3e148e..d8ef187 100644 (file)
@@ -32,8 +32,9 @@
 #include <asm/io.h>
 #include "dwmac1000.h"
 
-static void dwmac1000_core_init(void __iomem *ioaddr, int mtu)
+static void dwmac1000_core_init(struct mac_device_info *hw, int mtu)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONTROL);
        value |= GMAC_CORE_INIT;
        if (mtu > 1500)
@@ -52,8 +53,9 @@ static void dwmac1000_core_init(void __iomem *ioaddr, int mtu)
 #endif
 }
 
-static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
+static int dwmac1000_rx_ipc_enable(struct mac_device_info *hw)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_CONTROL);
 
        value |= GMAC_CONTROL_IPC;
@@ -64,8 +66,9 @@ static int dwmac1000_rx_ipc_enable(void __iomem *ioaddr)
        return !!(value & GMAC_CONTROL_IPC);
 }
 
-static void dwmac1000_dump_regs(void __iomem *ioaddr)
+static void dwmac1000_dump_regs(struct mac_device_info *hw)
 {
+       void __iomem *ioaddr = hw->pcsr;
        int i;
        pr_info("\tDWMAC1000 regs (base addr = 0x%p)\n", ioaddr);
 
@@ -76,69 +79,98 @@ static void dwmac1000_dump_regs(void __iomem *ioaddr)
        }
 }
 
-static void dwmac1000_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void dwmac1000_set_umac_addr(struct mac_device_info *hw,
+                                   unsigned char *addr,
                                    unsigned int reg_n)
 {
+       void __iomem *ioaddr = hw->pcsr;
        stmmac_set_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
                            GMAC_ADDR_LOW(reg_n));
 }
 
-static void dwmac1000_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void dwmac1000_get_umac_addr(struct mac_device_info *hw,
+                                   unsigned char *addr,
                                    unsigned int reg_n)
 {
+       void __iomem *ioaddr = hw->pcsr;
        stmmac_get_mac_addr(ioaddr, addr, GMAC_ADDR_HIGH(reg_n),
                            GMAC_ADDR_LOW(reg_n));
 }
 
-static void dwmac1000_set_filter(struct net_device *dev, int id)
+static void dwmac1000_set_mchash(void __iomem *ioaddr, u32 *mcfilterbits,
+                                int mcbitslog2)
+{
+       int numhashregs, regs;
+
+       switch (mcbitslog2) {
+       case 6:
+               writel(mcfilterbits[0], ioaddr + GMAC_HASH_LOW);
+               writel(mcfilterbits[1], ioaddr + GMAC_HASH_HIGH);
+               return;
+               break;
+       case 7:
+               numhashregs = 4;
+               break;
+       case 8:
+               numhashregs = 8;
+               break;
+       default:
+               pr_debug("STMMAC: err in setting mulitcast filter\n");
+               return;
+               break;
+       }
+       for (regs = 0; regs < numhashregs; regs++)
+               writel(mcfilterbits[regs],
+                      ioaddr + GMAC_EXTHASH_BASE + regs * 4);
+}
+
+static void dwmac1000_set_filter(struct mac_device_info *hw,
+                                struct net_device *dev)
 {
        void __iomem *ioaddr = (void __iomem *)dev->base_addr;
        unsigned int value = 0;
-       unsigned int perfect_addr_number;
+       unsigned int perfect_addr_number = hw->unicast_filter_entries;
+       u32 mc_filter[2];
+       int mcbitslog2 = hw->mcast_bits_log2;
 
        pr_debug("%s: # mcasts %d, # unicast %d\n", __func__,
                 netdev_mc_count(dev), netdev_uc_count(dev));
 
-       if (dev->flags & IFF_PROMISC)
+       memset(mc_filter, 0, sizeof(mc_filter));
+
+       if (dev->flags & IFF_PROMISC) {
                value = GMAC_FRAME_FILTER_PR;
-       else if ((netdev_mc_count(dev) > HASH_TABLE_SIZE)
-                || (dev->flags & IFF_ALLMULTI)) {
+       } else if (dev->flags & IFF_ALLMULTI) {
                value = GMAC_FRAME_FILTER_PM;   /* pass all multi */
-               writel(0xffffffff, ioaddr + GMAC_HASH_HIGH);
-               writel(0xffffffff, ioaddr + GMAC_HASH_LOW);
        } else if (!netdev_mc_empty(dev)) {
-               u32 mc_filter[2];
                struct netdev_hw_addr *ha;
 
                /* Hash filter for multicast */
                value = GMAC_FRAME_FILTER_HMC;
 
-               memset(mc_filter, 0, sizeof(mc_filter));
                netdev_for_each_mc_addr(ha, dev) {
-                       /* The upper 6 bits of the calculated CRC are used to
-                        * index the contens of the hash table
+                       /* The upper n bits of the calculated CRC are used to
+                        * index the contents of the hash table. The number of
+                        * bits used depends on the hardware configuration
+                        * selected at core configuration time.
                         */
-                       int bit_nr = bitrev32(~crc32_le(~0, ha->addr, 6)) >> 26;
+                       int bit_nr = bitrev32(~crc32_le(~0, ha->addr,
+                                             ETH_ALEN)) >>
+                                             (32 - mcbitslog2);
                        /* The most significant bit determines the register to
                         * use (H/L) while the other 5 bits determine the bit
                         * within the register.
                         */
                        mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
                }
-               writel(mc_filter[0], ioaddr + GMAC_HASH_LOW);
-               writel(mc_filter[1], ioaddr + GMAC_HASH_HIGH);
        }
 
-       /* Extra 16 regs are available in cores newer than the 3.40. */
-       if (id > DWMAC_CORE_3_40)
-               perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES;
-       else
-               perfect_addr_number = GMAC_MAX_PERFECT_ADDRESSES / 2;
+       dwmac1000_set_mchash(ioaddr, mc_filter, mcbitslog2);
 
        /* Handle multiple unicast addresses (perfect filtering) */
        if (netdev_uc_count(dev) > perfect_addr_number)
-               /* Switch to promiscuous mode if more than 16 addrs
-                * are required
+               /* Switch to promiscuous mode if more than unicast
+                * addresses are requested than supported by hardware.
                 */
                value |= GMAC_FRAME_FILTER_PR;
        else {
@@ -146,7 +178,9 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
                struct netdev_hw_addr *ha;
 
                netdev_for_each_uc_addr(ha, dev) {
-                       dwmac1000_set_umac_addr(ioaddr, ha->addr, reg);
+                       stmmac_get_mac_addr(ioaddr, ha->addr,
+                                           GMAC_ADDR_HIGH(reg),
+                                           GMAC_ADDR_LOW(reg));
                        reg++;
                }
        }
@@ -156,15 +190,13 @@ static void dwmac1000_set_filter(struct net_device *dev, int id)
        value |= GMAC_FRAME_FILTER_RA;
 #endif
        writel(value, ioaddr + GMAC_FRAME_FILTER);
-
-       pr_debug("\tFilter: 0x%08x\n\tHash: HI 0x%08x, LO 0x%08x\n",
-                readl(ioaddr + GMAC_FRAME_FILTER),
-                readl(ioaddr + GMAC_HASH_HIGH), readl(ioaddr + GMAC_HASH_LOW));
 }
 
-static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
+
+static void dwmac1000_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
                                unsigned int fc, unsigned int pause_time)
 {
+       void __iomem *ioaddr = hw->pcsr;
        unsigned int flow = 0;
 
        pr_debug("GMAC Flow-Control:\n");
@@ -185,8 +217,9 @@ static void dwmac1000_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
        writel(flow, ioaddr + GMAC_FLOW_CTRL);
 }
 
-static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
+static void dwmac1000_pmt(struct mac_device_info *hw, unsigned long mode)
 {
+       void __iomem *ioaddr = hw->pcsr;
        unsigned int pmt = 0;
 
        if (mode & WAKE_MAGIC) {
@@ -201,9 +234,10 @@ static void dwmac1000_pmt(void __iomem *ioaddr, unsigned long mode)
        writel(pmt, ioaddr + GMAC_PMT);
 }
 
-static int dwmac1000_irq_status(void __iomem *ioaddr,
+static int dwmac1000_irq_status(struct mac_device_info *hw,
                                struct stmmac_extra_stats *x)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 intr_status = readl(ioaddr + GMAC_INT_STATUS);
        int ret = 0;
 
@@ -268,8 +302,9 @@ static int dwmac1000_irq_status(void __iomem *ioaddr,
        return ret;
 }
 
-static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
+static void dwmac1000_set_eee_mode(struct mac_device_info *hw)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 value;
 
        /* Enable the link status receive on RGMII, SGMII ore SMII
@@ -281,8 +316,9 @@ static void dwmac1000_set_eee_mode(void __iomem *ioaddr)
        writel(value, ioaddr + LPI_CTRL_STATUS);
 }
 
-static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
+static void dwmac1000_reset_eee_mode(struct mac_device_info *hw)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 value;
 
        value = readl(ioaddr + LPI_CTRL_STATUS);
@@ -290,8 +326,9 @@ static void dwmac1000_reset_eee_mode(void __iomem *ioaddr)
        writel(value, ioaddr + LPI_CTRL_STATUS);
 }
 
-static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
+static void dwmac1000_set_eee_pls(struct mac_device_info *hw, int link)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 value;
 
        value = readl(ioaddr + LPI_CTRL_STATUS);
@@ -304,8 +341,9 @@ static void dwmac1000_set_eee_pls(void __iomem *ioaddr, int link)
        writel(value, ioaddr + LPI_CTRL_STATUS);
 }
 
-static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
+static void dwmac1000_set_eee_timer(struct mac_device_info *hw, int ls, int tw)
 {
+       void __iomem *ioaddr = hw->pcsr;
        int value = ((tw & 0xffff)) | ((ls & 0x7ff) << 16);
 
        /* Program the timers in the LPI timer control register:
@@ -318,13 +356,11 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
        writel(value, ioaddr + LPI_TIMER_CTRL);
 }
 
-static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
+static void dwmac1000_ctrl_ane(struct mac_device_info *hw, bool restart)
 {
-       u32 value;
-
-       value = readl(ioaddr + GMAC_AN_CTRL);
+       void __iomem *ioaddr = hw->pcsr;
        /* auto negotiation enable and External Loopback enable */
-       value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
+       u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
 
        if (restart)
                value |= GMAC_AN_CTRL_RAN;
@@ -332,8 +368,9 @@ static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
        writel(value, ioaddr + GMAC_AN_CTRL);
 }
 
-static void dwmac1000_get_adv(void __iomem *ioaddr, struct rgmii_adv *adv)
+static void dwmac1000_get_adv(struct mac_device_info *hw, struct rgmii_adv *adv)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + GMAC_ANE_ADV);
 
        if (value & GMAC_ANE_FD)
@@ -371,7 +408,8 @@ static const struct stmmac_ops dwmac1000_ops = {
        .get_adv = dwmac1000_get_adv,
 };
 
-struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
+struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr, int mcbins,
+                                       int perfect_uc_entries)
 {
        struct mac_device_info *mac;
        u32 hwid = readl(ioaddr + GMAC_VERSION);
@@ -380,6 +418,14 @@ struct mac_device_info *dwmac1000_setup(void __iomem *ioaddr)
        if (!mac)
                return NULL;
 
+       mac->pcsr = ioaddr;
+       mac->multicast_filter_bins = mcbins;
+       mac->unicast_filter_entries = perfect_uc_entries;
+       mac->mcast_bits_log2 = 0;
+
+       if (mac->multicast_filter_bins)
+               mac->mcast_bits_log2 = ilog2(mac->multicast_filter_bins);
+
        mac->mac = &dwmac1000_ops;
        mac->dma = &dwmac1000_dma_ops;
 
index 2ff767b..f8dd773 100644 (file)
@@ -32,8 +32,9 @@
 #include <asm/io.h>
 #include "dwmac100.h"
 
-static void dwmac100_core_init(void __iomem *ioaddr, int mtu)
+static void dwmac100_core_init(struct mac_device_info *hw, int mtu)
 {
+       void __iomem *ioaddr = hw->pcsr;
        u32 value = readl(ioaddr + MAC_CONTROL);
 
        writel((value | MAC_CORE_INIT), ioaddr + MAC_CONTROL);
@@ -43,8 +44,9 @@ static void dwmac100_core_init(void __iomem *ioaddr, int mtu)
 #endif
 }
 
-static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
+static void dwmac100_dump_mac_regs(struct mac_device_info *hw)
 {
+       void __iomem *ioaddr = hw->pcsr;
        pr_info("\t----------------------------------------------\n"
                "\t  DWMAC 100 CSR (base addr = 0x%p)\n"
                "\t----------------------------------------------\n", ioaddr);
@@ -66,30 +68,35 @@ static void dwmac100_dump_mac_regs(void __iomem *ioaddr)
                readl(ioaddr + MAC_VLAN2));
 }
 
-static int dwmac100_rx_ipc_enable(void __iomem *ioaddr)
+static int dwmac100_rx_ipc_enable(struct mac_device_info *hw)
 {
        return 0;
 }
 
-static int dwmac100_irq_status(void __iomem *ioaddr,
+static int dwmac100_irq_status(struct mac_device_info *hw,
                               struct stmmac_extra_stats *x)
 {
        return 0;
 }
 
-static void dwmac100_set_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void dwmac100_set_umac_addr(struct mac_device_info *hw,
+                                  unsigned char *addr,
                                   unsigned int reg_n)
 {
+       void __iomem *ioaddr = hw->pcsr;
        stmmac_set_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
 }
 
-static void dwmac100_get_umac_addr(void __iomem *ioaddr, unsigned char *addr,
+static void dwmac100_get_umac_addr(struct mac_device_info *hw,
+                                  unsigned char *addr,
                                   unsigned int reg_n)
 {
+       void __iomem *ioaddr = hw->pcsr;
        stmmac_get_mac_addr(ioaddr, addr, MAC_ADDR_HIGH, MAC_ADDR_LOW);
 }
 
-static void dwmac100_set_filter(struct net_device *dev, int id)
+static void dwmac100_set_filter(struct mac_device_info *hw,
+                               struct net_device *dev)
 {
        void __iomem *ioaddr = (void __iomem *)dev->base_addr;
        u32 value = readl(ioaddr + MAC_CONTROL);
@@ -137,9 +144,10 @@ static void dwmac100_set_filter(struct net_device *dev, int id)
        writel(value, ioaddr + MAC_CONTROL);
 }
 
-static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
+static void dwmac100_flow_ctrl(struct mac_device_info *hw, unsigned int duplex,
                               unsigned int fc, unsigned int pause_time)
 {
+       void __iomem *ioaddr = hw->pcsr;
        unsigned int flow = MAC_FLOW_CTRL_ENABLE;
 
        if (duplex)
@@ -148,7 +156,7 @@ static void dwmac100_flow_ctrl(void __iomem *ioaddr, unsigned int duplex,
 }
 
 /* No PMT module supported on ST boards with this Eth chip. */
-static void dwmac100_pmt(void __iomem *ioaddr, unsigned long mode)
+static void dwmac100_pmt(struct mac_device_info *hw, unsigned long mode)
 {
        return;
 }
@@ -175,6 +183,7 @@ struct mac_device_info *dwmac100_setup(void __iomem *ioaddr)
 
        pr_info("\tDWMAC100\n");
 
+       mac->pcsr = ioaddr;
        mac->mac = &dwmac100_ops;
        mac->dma = &dwmac100_dma_ops;
 
index 7e6628a..1e2bcf5 100644 (file)
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
                        x->rx_msg_type_delay_req++;
                else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
                        x->rx_msg_type_delay_resp++;
-               else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ)
+               else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
                        x->rx_msg_type_pdelay_req++;
                else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
                        x->rx_msg_type_pdelay_resp++;
index c62e67f..9af50ba 100644 (file)
@@ -262,7 +262,7 @@ static int stmmac_ethtool_getsettings(struct net_device *dev,
 
                /* Get and convert ADV/LP_ADV from the HW AN registers */
                if (priv->hw->mac->get_adv)
-                       priv->hw->mac->get_adv(priv->ioaddr, &adv);
+                       priv->hw->mac->get_adv(priv->hw, &adv);
                else
                        return -EOPNOTSUPP;     /* should never happen indeed */
 
@@ -350,7 +350,7 @@ static int stmmac_ethtool_setsettings(struct net_device *dev,
 
                        spin_lock(&priv->lock);
                        if (priv->hw->mac->ctrl_ane)
-                               priv->hw->mac->ctrl_ane(priv->ioaddr, 1);
+                               priv->hw->mac->ctrl_ane(priv->hw, 1);
                        spin_unlock(&priv->lock);
                }
 
@@ -464,7 +464,7 @@ stmmac_set_pauseparam(struct net_device *netdev,
                if (netif_running(netdev))
                        ret = phy_start_aneg(phy);
        } else
-               priv->hw->mac->flow_ctrl(priv->ioaddr, phy->duplex,
+               priv->hw->mac->flow_ctrl(priv->hw, phy->duplex,
                                         priv->flow_ctrl, priv->pause);
        return ret;
 }
index 057a120..08addd6 100644 (file)
@@ -233,7 +233,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
        /* Check and enter in LPI mode */
        if ((priv->dirty_tx == priv->cur_tx) &&
            (priv->tx_path_in_lpi_mode == false))
-               priv->hw->mac->set_eee_mode(priv->ioaddr);
+               priv->hw->mac->set_eee_mode(priv->hw);
 }
 
 /**
@@ -244,7 +244,7 @@ static void stmmac_enable_eee_mode(struct stmmac_priv *priv)
  */
 void stmmac_disable_eee_mode(struct stmmac_priv *priv)
 {
-       priv->hw->mac->reset_eee_mode(priv->ioaddr);
+       priv->hw->mac->reset_eee_mode(priv->hw);
        del_timer_sync(&priv->eee_ctrl_timer);
        priv->tx_path_in_lpi_mode = false;
 }
@@ -298,7 +298,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        if (priv->eee_active) {
                                pr_debug("stmmac: disable EEE\n");
                                del_timer_sync(&priv->eee_ctrl_timer);
-                               priv->hw->mac->set_eee_timer(priv->ioaddr, 0,
+                               priv->hw->mac->set_eee_timer(priv->hw, 0,
                                                             tx_lpi_timer);
                        }
                        priv->eee_active = 0;
@@ -313,12 +313,12 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
                        priv->eee_ctrl_timer.expires = STMMAC_LPI_T(eee_timer);
                        add_timer(&priv->eee_ctrl_timer);
 
-                       priv->hw->mac->set_eee_timer(priv->ioaddr,
+                       priv->hw->mac->set_eee_timer(priv->hw,
                                                     STMMAC_DEFAULT_LIT_LS,
                                                     tx_lpi_timer);
                } else
                        /* Set HW EEE according to the speed */
-                       priv->hw->mac->set_eee_pls(priv->ioaddr,
+                       priv->hw->mac->set_eee_pls(priv->hw,
                                                   priv->phydev->link);
 
                pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
@@ -693,7 +693,7 @@ static void stmmac_adjust_link(struct net_device *dev)
                }
                /* Flow Control operation */
                if (phydev->pause)
-                       priv->hw->mac->flow_ctrl(priv->ioaddr, phydev->duplex,
+                       priv->hw->mac->flow_ctrl(priv->hw, phydev->duplex,
                                                 fc, pause_time);
 
                if (phydev->speed != priv->speed) {
@@ -1531,8 +1531,7 @@ static int stmmac_get_hw_features(struct stmmac_priv *priv)
 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
 {
        if (!is_valid_ether_addr(priv->dev->dev_addr)) {
-               priv->hw->mac->get_umac_addr((void __iomem *)
-                                            priv->dev->base_addr,
+               priv->hw->mac->get_umac_addr(priv->hw,
                                             priv->dev->dev_addr, 0);
                if (!is_valid_ether_addr(priv->dev->dev_addr))
                        eth_hw_addr_random(priv->dev);
@@ -1629,14 +1628,14 @@ static int stmmac_hw_setup(struct net_device *dev)
        }
 
        /* Copy the MAC addr into the HW  */
-       priv->hw->mac->set_umac_addr(priv->ioaddr, dev->dev_addr, 0);
+       priv->hw->mac->set_umac_addr(priv->hw, dev->dev_addr, 0);
 
        /* If required, perform hw setup of the bus. */
        if (priv->plat->bus_setup)
                priv->plat->bus_setup(priv->ioaddr);
 
        /* Initialize the MAC Core */
-       priv->hw->mac->core_init(priv->ioaddr, dev->mtu);
+       priv->hw->mac->core_init(priv->hw, dev->mtu);
 
        /* Enable the MAC Rx/Tx */
        stmmac_set_mac(priv->ioaddr, true);
@@ -1662,7 +1661,7 @@ static int stmmac_hw_setup(struct net_device *dev)
 
        /* Dump DMA/MAC registers */
        if (netif_msg_hw(priv)) {
-               priv->hw->mac->dump_regs(priv->ioaddr);
+               priv->hw->mac->dump_regs(priv->hw);
                priv->hw->dma->dump_regs(priv->ioaddr);
        }
        priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
@@ -1677,7 +1676,7 @@ static int stmmac_hw_setup(struct net_device *dev)
        }
 
        if (priv->pcs && priv->hw->mac->ctrl_ane)
-               priv->hw->mac->ctrl_ane(priv->ioaddr, 0);
+               priv->hw->mac->ctrl_ane(priv->hw, 0);
 
        return 0;
 }
@@ -2226,7 +2225,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
        struct stmmac_priv *priv = netdev_priv(dev);
 
        spin_lock(&priv->lock);
-       priv->hw->mac->set_filter(dev, priv->synopsys_id);
+       priv->hw->mac->set_filter(priv->hw, dev);
        spin_unlock(&priv->lock);
 }
 
@@ -2316,8 +2315,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id)
 
        /* To handle GMAC own interrupts */
        if (priv->plat->has_gmac) {
-               int status = priv->hw->mac->host_irq_status((void __iomem *)
-                                                           dev->base_addr,
+               int status = priv->hw->mac->host_irq_status(priv->hw,
                                                            &priv->xstats);
                if (unlikely(status)) {
                        /* For LPI we need to save the tx status */
@@ -2600,7 +2598,9 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
        /* Identify the MAC HW device */
        if (priv->plat->has_gmac) {
                priv->dev->priv_flags |= IFF_UNICAST_FLT;
-               mac = dwmac1000_setup(priv->ioaddr);
+               mac = dwmac1000_setup(priv->ioaddr,
+                                     priv->plat->multicast_filter_bins,
+                                     priv->plat->unicast_filter_entries);
        } else {
                mac = dwmac100_setup(priv->ioaddr);
        }
@@ -2649,7 +2649,7 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
        /* To use alternate (extended) or normal descriptor structures */
        stmmac_selec_desc_mode(priv);
 
-       ret = priv->hw->mac->rx_ipc(priv->ioaddr);
+       ret = priv->hw->mac->rx_ipc(priv->hw);
        if (!ret) {
                pr_warn(" RX IPC Checksum Offload not configured.\n");
                priv->plat->rx_coe = STMMAC_RX_COE_NONE;
@@ -2869,7 +2869,7 @@ int stmmac_suspend(struct net_device *ndev)
 
        /* Enable Power down mode by programming the PMT regs */
        if (device_may_wakeup(priv->device)) {
-               priv->hw->mac->pmt(priv->ioaddr, priv->wolopts);
+               priv->hw->mac->pmt(priv->hw, priv->wolopts);
                priv->irq_wake = 1;
        } else {
                stmmac_set_mac(priv->ioaddr, false);
@@ -2878,6 +2878,10 @@ int stmmac_suspend(struct net_device *ndev)
                clk_disable_unprepare(priv->stmmac_clk);
        }
        spin_unlock_irqrestore(&priv->lock, flags);
+
+       priv->oldlink = 0;
+       priv->speed = 0;
+       priv->oldduplex = -1;
        return 0;
 }
 
@@ -2898,7 +2902,7 @@ int stmmac_resume(struct net_device *ndev)
         * from another devices (e.g. serial console).
         */
        if (device_may_wakeup(priv->device)) {
-               priv->hw->mac->pmt(priv->ioaddr, 0);
+               priv->hw->mac->pmt(priv->hw, 0);
                priv->irq_wake = 0;
        } else {
                pinctrl_pm_select_default_state(priv->device);
index ea7a65b..bb524a9 100644 (file)
@@ -52,6 +52,59 @@ static const struct of_device_id stmmac_dt_ids[] = {
 MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
 
 #ifdef CONFIG_OF
+
+/* This function validates the number of Multicast filtering bins specified
+ * by the configuration through the device tree. The Synopsys GMAC supports
+ * 64 bins, 128 bins, or 256 bins. "bins" refer to the division of CRC
+ * number space. 64 bins correspond to 6 bits of the CRC, 128 corresponds
+ * to 7 bits, and 256 refers to 8 bits of the CRC. Any other setting is
+ * invalid and will cause the filtering algorithm to use Multicast
+ * promiscuous mode.
+ */
+static int dwmac1000_validate_mcast_bins(int mcast_bins)
+{
+       int x = mcast_bins;
+
+       switch (x) {
+       case HASH_TABLE_SIZE:
+       case 128:
+       case 256:
+               break;
+       default:
+               x = 0;
+               pr_info("Hash table entries set to unexpected value %d",
+                       mcast_bins);
+               break;
+       }
+       return x;
+}
+
+/* This function validates the number of Unicast address entries supported
+ * by a particular Synopsys 10/100/1000 controller. The Synopsys controller
+ * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * logic. This function validates a valid, supported configuration is
+ * selected, and defaults to 1 Unicast address if an unsupported
+ * configuration is selected.
+ */
+static int dwmac1000_validate_ucast_entries(int ucast_entries)
+{
+       int x = ucast_entries;
+
+       switch (x) {
+       case 1:
+       case 32:
+       case 64:
+       case 128:
+               break;
+       default:
+               x = 1;
+               pr_info("Unicast table entries set to unexpected value %d\n",
+                       ucast_entries);
+               break;
+       }
+       return x;
+}
+
 static int stmmac_probe_config_dt(struct platform_device *pdev,
                                  struct plat_stmmacenet_data *plat,
                                  const char **mac)
@@ -115,6 +168,12 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
         */
        plat->maxmtu = JUMBO_LEN;
 
+       /* Set default value for multicast hash bins */
+       plat->multicast_filter_bins = HASH_TABLE_SIZE;
+
+       /* Set default value for unicast filter entries */
+       plat->unicast_filter_entries = 1;
+
        /*
         * Currently only the properties needed on SPEAr600
         * are provided. All other properties should be added
@@ -131,6 +190,14 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
                 * are clearly MTUs
                 */
                of_property_read_u32(np, "max-frame-size", &plat->maxmtu);
+               of_property_read_u32(np, "snps,multicast-filter-bins",
+                                    &plat->multicast_filter_bins);
+               of_property_read_u32(np, "snps,perfect-filter-entries",
+                                    &plat->unicast_filter_entries);
+               plat->unicast_filter_entries = dwmac1000_validate_ucast_entries(
+                                              plat->unicast_filter_entries);
+               plat->multicast_filter_bins = dwmac1000_validate_mcast_bins(
+                                             plat->multicast_filter_bins);
                plat->has_gmac = 1;
                plat->pmt = 1;
        }
index 79606f4..db8ffde 100644 (file)
@@ -2584,7 +2584,6 @@ static int niu_determine_phy_disposition(struct niu *np)
                                break;
                        default:
                                return -EINVAL;
-                               break;
                        }
                        phy_addr_off = niu_atca_port_num[np->port];
                        break;
index 1c24a8f..d813bfb 100644 (file)
@@ -610,6 +610,13 @@ static int __vnet_tx_trigger(struct vnet_port *port)
        return err;
 }
 
+static inline bool port_is_up(struct vnet_port *vnet)
+{
+       struct vio_driver_state *vio = &vnet->vio;
+
+       return !!(vio->hs_state & VIO_HS_COMPLETE);
+}
+
 struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
 {
        unsigned int hash = vnet_hashfn(skb->data);
@@ -617,14 +624,19 @@ struct vnet_port *__tx_port_find(struct vnet *vp, struct sk_buff *skb)
        struct vnet_port *port;
 
        hlist_for_each_entry(port, hp, hash) {
+               if (!port_is_up(port))
+                       continue;
                if (ether_addr_equal(port->raddr, skb->data))
                        return port;
        }
-       port = NULL;
-       if (!list_empty(&vp->port_list))
-               port = list_entry(vp->port_list.next, struct vnet_port, list);
-
-       return port;
+       list_for_each_entry(port, &vp->port_list, list) {
+               if (!port->switch_port)
+                       continue;
+               if (!port_is_up(port))
+                       continue;
+               return port;
+       }
+       return NULL;
 }
 
 struct vnet_port *tx_port_find(struct vnet *vp, struct sk_buff *skb)
@@ -1083,6 +1095,24 @@ static struct vnet *vnet_find_or_create(const u64 *local_mac)
        return vp;
 }
 
+static void vnet_cleanup(void)
+{
+       struct vnet *vp;
+       struct net_device *dev;
+
+       mutex_lock(&vnet_list_mutex);
+       while (!list_empty(&vnet_list)) {
+               vp = list_first_entry(&vnet_list, struct vnet, list);
+               list_del(&vp->list);
+               dev = vp->dev;
+               /* vio_unregister_driver() should have cleaned up port_list */
+               BUG_ON(!list_empty(&vp->port_list));
+               unregister_netdev(dev);
+               free_netdev(dev);
+       }
+       mutex_unlock(&vnet_list_mutex);
+}
+
 static const char *local_mac_prop = "local-mac-address";
 
 static struct vnet *vnet_find_parent(struct mdesc_handle *hp,
@@ -1240,7 +1270,6 @@ static int vnet_port_remove(struct vio_dev *vdev)
 
                kfree(port);
 
-               unregister_netdev(vp->dev);
        }
        return 0;
 }
@@ -1268,6 +1297,7 @@ static int __init vnet_init(void)
 static void __exit vnet_exit(void)
 {
        vio_unregister_driver(&vnet_port_driver);
+       vnet_cleanup();
 }
 
 module_init(vnet_init);
index 53150c2..1769700 100644 (file)
@@ -5,7 +5,7 @@
 config NET_VENDOR_TI
        bool "Texas Instruments (TI) devices"
        default y
-       depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX))
+       depends on PCI || EISA || AR7 || (ARM && (ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE))
        ---help---
          If you have a network (Ethernet) card belonging to this class, say Y
          and read the Ethernet-HOWTO, available from
@@ -32,7 +32,7 @@ config TI_DAVINCI_EMAC
 
 config TI_DAVINCI_MDIO
        tristate "TI DaVinci MDIO Support"
-       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX )
+       depends on ARM && ( ARCH_DAVINCI || ARCH_OMAP3 || SOC_AM33XX || ARCH_KEYSTONE )
        select PHYLIB
        ---help---
          This driver supports TI's DaVinci MDIO module.
index 7399a52..3809f4e 100644 (file)
@@ -67,42 +67,42 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
 #define CPMAC_RX_CONTROL               0x0014
 #define CPMAC_RX_TEARDOWN              0x0018
 #define CPMAC_MBP                      0x0100
-# define MBP_RXPASSCRC                 0x40000000
-# define MBP_RXQOS                     0x20000000
-# define MBP_RXNOCHAIN                 0x10000000
-# define MBP_RXCMF                     0x01000000
-# define MBP_RXSHORT                   0x00800000
-# define MBP_RXCEF                     0x00400000
-# define MBP_RXPROMISC                 0x00200000
-# define MBP_PROMISCCHAN(channel)      (((channel) & 0x7) << 16)
-# define MBP_RXBCAST                   0x00002000
-# define MBP_BCASTCHAN(channel)                (((channel) & 0x7) << 8)
-# define MBP_RXMCAST                   0x00000020
-# define MBP_MCASTCHAN(channel)                ((channel) & 0x7)
+#define MBP_RXPASSCRC                  0x40000000
+#define MBP_RXQOS                      0x20000000
+#define MBP_RXNOCHAIN                  0x10000000
+#define MBP_RXCMF                      0x01000000
+#define MBP_RXSHORT                    0x00800000
+#define MBP_RXCEF                      0x00400000
+#define MBP_RXPROMISC                  0x00200000
+#define MBP_PROMISCCHAN(channel)       (((channel) & 0x7) << 16)
+#define MBP_RXBCAST                    0x00002000
+#define MBP_BCASTCHAN(channel)         (((channel) & 0x7) << 8)
+#define MBP_RXMCAST                    0x00000020
+#define MBP_MCASTCHAN(channel)         ((channel) & 0x7)
 #define CPMAC_UNICAST_ENABLE           0x0104
 #define CPMAC_UNICAST_CLEAR            0x0108
 #define CPMAC_MAX_LENGTH               0x010c
 #define CPMAC_BUFFER_OFFSET            0x0110
 #define CPMAC_MAC_CONTROL              0x0160
-# define MAC_TXPTYPE                   0x00000200
-# define MAC_TXPACE                    0x00000040
-# define MAC_MII                       0x00000020
-# define MAC_TXFLOW                    0x00000010
-# define MAC_RXFLOW                    0x00000008
-# define MAC_MTEST                     0x00000004
-# define MAC_LOOPBACK                  0x00000002
-# define MAC_FDX                       0x00000001
+#define MAC_TXPTYPE                    0x00000200
+#define MAC_TXPACE                     0x00000040
+#define MAC_MII                                0x00000020
+#define MAC_TXFLOW                     0x00000010
+#define MAC_RXFLOW                     0x00000008
+#define MAC_MTEST                      0x00000004
+#define MAC_LOOPBACK                   0x00000002
+#define MAC_FDX                                0x00000001
 #define CPMAC_MAC_STATUS               0x0164
-# define MAC_STATUS_QOS                        0x00000004
-# define MAC_STATUS_RXFLOW             0x00000002
-# define MAC_STATUS_TXFLOW             0x00000001
+#define MAC_STATUS_QOS                 0x00000004
+#define MAC_STATUS_RXFLOW              0x00000002
+#define MAC_STATUS_TXFLOW              0x00000001
 #define CPMAC_TX_INT_ENABLE            0x0178
 #define CPMAC_TX_INT_CLEAR             0x017c
 #define CPMAC_MAC_INT_VECTOR           0x0180
-# define MAC_INT_STATUS                        0x00080000
-# define MAC_INT_HOST                  0x00040000
-# define MAC_INT_RX                    0x00020000
-# define MAC_INT_TX                    0x00010000
+#define MAC_INT_STATUS                 0x00080000
+#define MAC_INT_HOST                   0x00040000
+#define MAC_INT_RX                     0x00020000
+#define MAC_INT_TX                     0x00010000
 #define CPMAC_MAC_EOI_VECTOR           0x0184
 #define CPMAC_RX_INT_ENABLE            0x0198
 #define CPMAC_RX_INT_CLEAR             0x019c
@@ -118,8 +118,8 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
 #define CPMAC_TX_ACK(channel)          (0x0640 + (channel) * 4)
 #define CPMAC_RX_ACK(channel)          (0x0660 + (channel) * 4)
 #define CPMAC_REG_END                  0x0680
-/*
- * Rx/Tx statistics
+
+/* Rx/Tx statistics
  * TODO: use some of them to fill stats in cpmac_stats()
  */
 #define CPMAC_STATS_RX_GOOD            0x0200
@@ -157,24 +157,24 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
 /* MDIO bus */
 #define CPMAC_MDIO_VERSION             0x0000
 #define CPMAC_MDIO_CONTROL             0x0004
-# define MDIOC_IDLE                    0x80000000
-# define MDIOC_ENABLE                  0x40000000
-# define MDIOC_PREAMBLE                        0x00100000
-# define MDIOC_FAULT                   0x00080000
-# define MDIOC_FAULTDETECT             0x00040000
-# define MDIOC_INTTEST                 0x00020000
-# define MDIOC_CLKDIV(div)             ((div) & 0xff)
+#define MDIOC_IDLE                     0x80000000
+#define MDIOC_ENABLE                   0x40000000
+#define MDIOC_PREAMBLE                 0x00100000
+#define MDIOC_FAULT                    0x00080000
+#define MDIOC_FAULTDETECT              0x00040000
+#define MDIOC_INTTEST                  0x00020000
+#define MDIOC_CLKDIV(div)              ((div) & 0xff)
 #define CPMAC_MDIO_ALIVE               0x0008
 #define CPMAC_MDIO_LINK                        0x000c
 #define CPMAC_MDIO_ACCESS(channel)     (0x0080 + (channel) * 8)
-# define MDIO_BUSY                     0x80000000
-# define MDIO_WRITE                    0x40000000
-# define MDIO_REG(reg)                 (((reg) & 0x1f) << 21)
-# define MDIO_PHY(phy)                 (((phy) & 0x1f) << 16)
-# define MDIO_DATA(data)               ((data) & 0xffff)
+#define MDIO_BUSY                      0x80000000
+#define MDIO_WRITE                     0x40000000
+#define MDIO_REG(reg)                  (((reg) & 0x1f) << 21)
+#define MDIO_PHY(phy)                  (((phy) & 0x1f) << 16)
+#define MDIO_DATA(data)                        ((data) & 0xffff)
 #define CPMAC_MDIO_PHYSEL(channel)     (0x0084 + (channel) * 8)
-# define PHYSEL_LINKSEL                        0x00000040
-# define PHYSEL_LINKINT                        0x00000020
+#define PHYSEL_LINKSEL                 0x00000040
+#define PHYSEL_LINKINT                 0x00000020
 
 struct cpmac_desc {
        u32 hw_next;
@@ -224,12 +224,12 @@ static void cpmac_dump_regs(struct net_device *dev)
 {
        int i;
        struct cpmac_priv *priv = netdev_priv(dev);
+
        for (i = 0; i < CPMAC_REG_END; i += 4) {
                if (i % 16 == 0) {
                        if (i)
-                               pr_cont("\n");
-                       printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
-                              priv->regs + i);
+                               printk("\n");
+                       printk("%s: reg[%p]:", dev->name, priv->regs + i);
                }
                printk(" %08x", cpmac_read(priv->regs, i));
        }
@@ -239,7 +239,8 @@ static void cpmac_dump_regs(struct net_device *dev)
 static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
 {
        int i;
-       printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
+
+       printk("%s: desc[%p]:", dev->name, desc);
        for (i = 0; i < sizeof(*desc) / 4; i++)
                printk(" %08x", ((u32 *)desc)[i]);
        printk("\n");
@@ -249,6 +250,7 @@ static void cpmac_dump_all_desc(struct net_device *dev)
 {
        struct cpmac_priv *priv = netdev_priv(dev);
        struct cpmac_desc *dump = priv->rx_head;
+
        do {
                cpmac_dump_desc(dev, dump);
                dump = dump->next;
@@ -258,13 +260,13 @@ static void cpmac_dump_all_desc(struct net_device *dev)
 static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
 {
        int i;
-       printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
+
+       printk("%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
        for (i = 0; i < skb->len; i++) {
                if (i % 16 == 0) {
                        if (i)
-                               pr_cont("\n");
-                       printk(KERN_DEBUG "%s: data[%p]:", dev->name,
-                              skb->data + i);
+                               printk("\n");
+                       printk("%s: data[%p]:", dev->name, skb->data + i);
                }
                printk(" %02x", ((u8 *)skb->data)[i]);
        }
@@ -281,6 +283,7 @@ static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
                    MDIO_PHY(phy_id));
        while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
                cpu_relax();
+
        return MDIO_DATA(val);
 }
 
@@ -291,6 +294,7 @@ static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
                cpu_relax();
        cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
                    MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
+
        return 0;
 }
 
@@ -300,12 +304,13 @@ static int cpmac_mdio_reset(struct mii_bus *bus)
 
        cpmac_clk = clk_get(&bus->dev, "cpmac");
        if (IS_ERR(cpmac_clk)) {
-               printk(KERN_ERR "unable to get cpmac clock\n");
+               pr_err("unable to get cpmac clock\n");
                return -1;
        }
        ar7_device_reset(AR7_RESET_BIT_MDIO);
        cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
                    MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
+
        return 0;
 }
 
@@ -331,8 +336,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
                        cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
                        cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
                } else {
-                       /*
-                        * cpmac uses some strange mac address hashing
+                       /* cpmac uses some strange mac address hashing
                         * (not crc32)
                         */
                        netdev_for_each_mc_addr(ha, dev) {
@@ -369,8 +373,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
        cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
        if (unlikely(!desc->datalen)) {
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: rx: spurious interrupt\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev, "rx: spurious interrupt\n");
+
                return NULL;
        }
 
@@ -390,15 +394,14 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
                                                    DMA_FROM_DEVICE);
                desc->hw_data = (u32)desc->data_mapping;
                if (unlikely(netif_msg_pktdata(priv))) {
-                       printk(KERN_DEBUG "%s: received packet:\n",
-                              priv->dev->name);
+                       netdev_dbg(priv->dev, "received packet:\n");
                        cpmac_dump_skb(priv->dev, result);
                }
        } else {
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING
-                              "%s: low on skbs, dropping packet\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev,
+                                   "low on skbs, dropping packet\n");
+
                priv->dev->stats.rx_dropped++;
        }
 
@@ -418,8 +421,8 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
        spin_lock(&priv->rx_lock);
        if (unlikely(!priv->rx_head)) {
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: rx: polling, but no queue\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev, "rx: polling, but no queue\n");
+
                spin_unlock(&priv->rx_lock);
                napi_complete(napi);
                return 0;
@@ -432,15 +435,15 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
 
                if ((desc->dataflags & CPMAC_EOQ) != 0) {
                        /* The last update to eoq->hw_next didn't happen
-                       * soon enough, and the receiver stopped here.
-                       *Remember this descriptor so we can restart
-                       * the receiver after freeing some space.
-                       */
+                        * soon enough, and the receiver stopped here.
+                        * Remember this descriptor so we can restart
+                        * the receiver after freeing some space.
+                        */
                        if (unlikely(restart)) {
                                if (netif_msg_rx_err(priv))
-                                       printk(KERN_ERR "%s: poll found a"
-                                               " duplicate EOQ: %p and %p\n",
-                                               priv->dev->name, restart, desc);
+                                       netdev_err(priv->dev, "poll found a"
+                                                  " duplicate EOQ: %p and %p\n",
+                                                  restart, desc);
                                goto fatal_error;
                        }
 
@@ -457,25 +460,27 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
 
        if (desc != priv->rx_head) {
                /* We freed some buffers, but not the whole ring,
-                * add what we did free to the rx list */
+                * add what we did free to the rx list
+                */
                desc->prev->hw_next = (u32)0;
                priv->rx_head->prev->hw_next = priv->rx_head->mapping;
        }
 
        /* Optimization: If we did not actually process an EOQ (perhaps because
         * of quota limits), check to see if the tail of the queue has EOQ set.
-       * We should immediately restart in that case so that the receiver can
-       * restart and run in parallel with more packet processing.
-       * This lets us handle slightly larger bursts before running
-       * out of ring space (assuming dev->weight < ring_size) */
+        * We should immediately restart in that case so that the receiver can
+        * restart and run in parallel with more packet processing.
+        * This lets us handle slightly larger bursts before running
+        * out of ring space (assuming dev->weight < ring_size)
+        */
 
        if (!restart &&
             (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
                    == CPMAC_EOQ &&
             (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
                /* reset EOQ so the poll loop (above) doesn't try to
-               * restart this when it eventually gets to this descriptor.
-               */
+                * restart this when it eventually gets to this descriptor.
+                */
                priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
                restart = priv->rx_head;
        }
@@ -484,15 +489,13 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
                priv->dev->stats.rx_errors++;
                priv->dev->stats.rx_fifo_errors++;
                if (netif_msg_rx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: rx dma ring overrun\n",
-                              priv->dev->name);
+                       netdev_warn(priv->dev, "rx dma ring overrun\n");
 
                if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
                        if (netif_msg_drv(priv))
-                               printk(KERN_ERR "%s: cpmac_poll is trying to "
-                                       "restart rx from a descriptor that's "
-                                       "not free: %p\n",
-                                       priv->dev->name, restart);
+                               netdev_err(priv->dev, "cpmac_poll is trying "
+                                       "to restart rx from a descriptor "
+                                       "that's not free: %p\n", restart);
                        goto fatal_error;
                }
 
@@ -502,11 +505,12 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
        priv->rx_head = desc;
        spin_unlock(&priv->rx_lock);
        if (unlikely(netif_msg_rx_status(priv)))
-               printk(KERN_DEBUG "%s: poll processed %d packets\n",
-                      priv->dev->name, received);
+               netdev_dbg(priv->dev, "poll processed %d packets\n", received);
+
        if (processed == 0) {
                /* we ran out of packets to read,
-                * revert to interrupt-driven mode */
+                * revert to interrupt-driven mode
+                */
                napi_complete(napi);
                cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
                return 0;
@@ -516,16 +520,15 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
 
 fatal_error:
        /* Something went horribly wrong.
-        * Reset hardware to try to recover rather than wedging. */
-
+        * Reset hardware to try to recover rather than wedging.
+        */
        if (netif_msg_drv(priv)) {
-               printk(KERN_ERR "%s: cpmac_poll is confused. "
-                               "Resetting hardware\n", priv->dev->name);
+               netdev_err(priv->dev, "cpmac_poll is confused. "
+                          "Resetting hardware\n");
                cpmac_dump_all_desc(priv->dev);
-               printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
-                       priv->dev->name,
-                       cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
-                       cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
+               netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
+                          cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
+                          cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
        }
 
        spin_unlock(&priv->rx_lock);
@@ -537,6 +540,7 @@ fatal_error:
        cpmac_hw_stop(priv->dev);
        if (!schedule_work(&priv->reset_work))
                atomic_dec(&priv->reset_pending);
+
        return 0;
 
 }
@@ -560,8 +564,8 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
        desc = &priv->desc_ring[queue];
        if (unlikely(desc->dataflags & CPMAC_OWN)) {
                if (netif_msg_tx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING "%s: tx dma ring full\n",
-                              dev->name);
+                       netdev_warn(dev, "tx dma ring full\n");
+
                return NETDEV_TX_BUSY;
        }
 
@@ -575,8 +579,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
        desc->datalen = len;
        desc->buflen = len;
        if (unlikely(netif_msg_tx_queued(priv)))
-               printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
-                      skb->len);
+               netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
        if (unlikely(netif_msg_hw(priv)))
                cpmac_dump_desc(dev, desc);
        if (unlikely(netif_msg_pktdata(priv)))
@@ -602,8 +605,8 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
                                 DMA_TO_DEVICE);
 
                if (unlikely(netif_msg_tx_done(priv)))
-                       printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
-                              desc->skb, desc->skb->len);
+                       netdev_dbg(dev, "sent 0x%p, len=%d\n",
+                                  desc->skb, desc->skb->len);
 
                dev_kfree_skb_irq(desc->skb);
                desc->skb = NULL;
@@ -611,8 +614,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
                        netif_wake_subqueue(dev, queue);
        } else {
                if (netif_msg_tx_err(priv) && net_ratelimit())
-                       printk(KERN_WARNING
-                              "%s: end_xmit: spurious interrupt\n", dev->name);
+                       netdev_warn(dev, "end_xmit: spurious interrupt\n");
                if (__netif_subqueue_stopped(dev, queue))
                        netif_wake_subqueue(dev, queue);
        }
@@ -687,14 +689,14 @@ static void cpmac_clear_rx(struct net_device *dev)
        struct cpmac_priv *priv = netdev_priv(dev);
        struct cpmac_desc *desc;
        int i;
+
        if (unlikely(!priv->rx_head))
                return;
        desc = priv->rx_head;
        for (i = 0; i < priv->ring_size; i++) {
                if ((desc->dataflags & CPMAC_OWN) == 0) {
                        if (netif_msg_rx_err(priv) && net_ratelimit())
-                               printk(KERN_WARNING "%s: packet dropped\n",
-                                      dev->name);
+                               netdev_warn(dev, "packet dropped\n");
                        if (unlikely(netif_msg_hw(priv)))
                                cpmac_dump_desc(dev, desc);
                        desc->dataflags = CPMAC_OWN;
@@ -710,6 +712,7 @@ static void cpmac_clear_tx(struct net_device *dev)
 {
        struct cpmac_priv *priv = netdev_priv(dev);
        int i;
+
        if (unlikely(!priv->desc_ring))
                return;
        for (i = 0; i < CPMAC_QUEUES; i++) {
@@ -751,16 +754,16 @@ static void cpmac_check_status(struct net_device *dev)
        if (rx_code || tx_code) {
                if (netif_msg_drv(priv) && net_ratelimit()) {
                        /* Can't find any documentation on what these
-                        *error codes actually are. So just log them and hope..
+                        * error codes actually are. So just log them and hope..
                         */
                        if (rx_code)
-                               printk(KERN_WARNING "%s: host error %d on rx "
-                                    "channel %d (macstatus %08x), resetting\n",
-                                    dev->name, rx_code, rx_channel, macstatus);
+                               netdev_warn(dev, "host error %d on rx "
+                                       "channel %d (macstatus %08x), resetting\n",
+                                       rx_code, rx_channel, macstatus);
                        if (tx_code)
-                               printk(KERN_WARNING "%s: host error %d on tx "
-                                    "channel %d (macstatus %08x), resetting\n",
-                                    dev->name, tx_code, tx_channel, macstatus);
+                               netdev_warn(dev, "host error %d on tx "
+                                       "channel %d (macstatus %08x), resetting\n",
+                                       tx_code, tx_channel, macstatus);
                }
 
                netif_tx_stop_all_queues(dev);
@@ -785,8 +788,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
        status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
 
        if (unlikely(netif_msg_intr(priv)))
-               printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
-                      status);
+               netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
 
        if (status & MAC_INT_TX)
                cpmac_end_xmit(dev, (status & 7));
@@ -815,7 +817,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
        dev->stats.tx_errors++;
        spin_unlock(&priv->lock);
        if (netif_msg_tx_err(priv) && net_ratelimit())
-               printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
+               netdev_warn(dev, "transmit timeout\n");
 
        atomic_inc(&priv->reset_pending);
        barrier();
@@ -829,6 +831,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
 static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
 {
        struct cpmac_priv *priv = netdev_priv(dev);
+
        if (!(netif_running(dev)))
                return -EINVAL;
        if (!priv->phy)
@@ -884,6 +887,7 @@ static int cpmac_set_ringparam(struct net_device *dev,
        if (netif_running(dev))
                return -EBUSY;
        priv->ring_size = ring->rx_pending;
+
        return 0;
 }
 
@@ -951,8 +955,8 @@ static int cpmac_open(struct net_device *dev)
        mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
        if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: failed to request registers\n",
-                              dev->name);
+                       netdev_err(dev, "failed to request registers\n");
+
                res = -ENXIO;
                goto fail_reserve;
        }
@@ -960,8 +964,8 @@ static int cpmac_open(struct net_device *dev)
        priv->regs = ioremap(mem->start, resource_size(mem));
        if (!priv->regs) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: failed to remap registers\n",
-                              dev->name);
+                       netdev_err(dev, "failed to remap registers\n");
+
                res = -ENXIO;
                goto fail_remap;
        }
@@ -1003,8 +1007,8 @@ static int cpmac_open(struct net_device *dev)
        res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
        if (res) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: failed to obtain irq\n",
-                              dev->name);
+                       netdev_err(dev, "failed to obtain irq\n");
+
                goto fail_irq;
        }
 
@@ -1077,6 +1081,7 @@ static int cpmac_stop(struct net_device *dev)
        dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
                          (CPMAC_QUEUES + priv->ring_size),
                          priv->desc_ring, priv->dma_ring);
+
        return 0;
 }
 
@@ -1121,7 +1126,7 @@ static int cpmac_probe(struct platform_device *pdev)
 
        if (phy_id == PHY_MAX_ADDR) {
                dev_err(&pdev->dev, "no PHY present, falling back "
-                                       "to switch on MDIO bus 0\n");
+                       "to switch on MDIO bus 0\n");
                strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
                phy_id = pdev->id;
        }
@@ -1137,7 +1142,7 @@ static int cpmac_probe(struct platform_device *pdev)
        mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
        if (!mem) {
                rc = -ENODEV;
-               goto fail;
+               goto out;
        }
 
        dev->irq = platform_get_irq_byname(pdev, "irq");
@@ -1162,44 +1167,48 @@ static int cpmac_probe(struct platform_device *pdev)
 
        if (IS_ERR(priv->phy)) {
                if (netif_msg_drv(priv))
-                       printk(KERN_ERR "%s: Could not attach to PHY\n",
-                              dev->name);
+                       dev_err(&pdev->dev, "Could not attach to PHY\n");
+
                rc = PTR_ERR(priv->phy);
-               goto fail;
+               goto out;
        }
 
        rc = register_netdev(dev);
        if (rc) {
-               printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
-                      dev->name);
+               dev_err(&pdev->dev, "Could not register net device\n");
                goto fail;
        }
 
        if (netif_msg_probe(priv)) {
-               printk(KERN_INFO
-                      "cpmac: device %s (regs: %p, irq: %d, phy: %s, "
-                      "mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
-                      priv->phy_name, dev->dev_addr);
+               dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
+                        "mac: %pM\n", (void *)mem->start, dev->irq,
+                        priv->phy_name, dev->dev_addr);
        }
+
        return 0;
 
 fail:
        free_netdev(dev);
+out:
        return rc;
 }
 
 static int cpmac_remove(struct platform_device *pdev)
 {
        struct net_device *dev = platform_get_drvdata(pdev);
+
        unregister_netdev(dev);
        free_netdev(dev);
+
        return 0;
 }
 
 static struct platform_driver cpmac_driver = {
-       .driver.name = "cpmac",
-       .driver.owner = THIS_MODULE,
-       .probe = cpmac_probe,
+       .driver = {
+               .name   = "cpmac",
+               .owner  = THIS_MODULE,
+       },
+       .probe  = cpmac_probe,
        .remove = cpmac_remove,
 };
 
@@ -1221,7 +1230,7 @@ int cpmac_init(void)
        cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
 
        if (!cpmac_mii->priv) {
-               printk(KERN_ERR "Can't ioremap mdio registers\n");
+               pr_err("Can't ioremap mdio registers\n");
                res = -ENXIO;
                goto fail_alloc;
        }
index ff380da..999fb72 100644 (file)
@@ -884,14 +884,16 @@ static int cpsw_set_coalesce(struct net_device *ndev,
        u32 addnl_dvdr = 1;
        u32 coal_intvl = 0;
 
-       if (!coal->rx_coalesce_usecs)
-               return -EINVAL;
-
        coal_intvl = coal->rx_coalesce_usecs;
 
        int_ctrl =  readl(&priv->wr_regs->int_control);
        prescale = priv->bus_freq_mhz * 4;
 
+       if (!coal->rx_coalesce_usecs) {
+               int_ctrl &= ~(CPSW_INTPRESCALE_MASK | CPSW_INTPACEEN);
+               goto update_return;
+       }
+
        if (coal_intvl < CPSW_CMINTMIN_INTVL)
                coal_intvl = CPSW_CMINTMIN_INTVL;
 
@@ -919,6 +921,8 @@ static int cpsw_set_coalesce(struct net_device *ndev,
        int_ctrl |= CPSW_INTPACEEN;
        int_ctrl &= (~CPSW_INTPRESCALE_MASK);
        int_ctrl |= (prescale & CPSW_INTPRESCALE_MASK);
+
+update_return:
        writel(int_ctrl, &priv->wr_regs->int_control);
 
        cpsw_notice(priv, timer, "Set coalesce to %d usecs.\n", coal_intvl);
@@ -999,17 +1003,6 @@ static void cpsw_get_ethtool_stats(struct net_device *ndev,
        }
 }
 
-static inline int __show_stat(char *buf, int maxlen, const char *name, u32 val)
-{
-       static char *leader = "........................................";
-
-       if (!val)
-               return 0;
-       else
-               return snprintf(buf, maxlen, "%s %s %10d\n", name,
-                               leader + strlen(name), val);
-}
-
 static int cpsw_common_res_usage_state(struct cpsw_priv *priv)
 {
        u32 i;
@@ -1212,7 +1205,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
        for_each_slave(priv, cpsw_slave_open, priv);
 
        /* Add default VLAN */
-       cpsw_add_default_vlan(priv);
+       if (!priv->data.dual_emac)
+               cpsw_add_default_vlan(priv);
+       else
+               cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
+                                 ALE_ALL_PORTS << priv->host_port,
+                                 ALE_ALL_PORTS << priv->host_port, 0, 0);
 
        if (!cpsw_common_res_usage_state(priv)) {
                /* setup tx dma to fixed prio and zero offset */
@@ -1666,14 +1664,34 @@ static const struct net_device_ops cpsw_netdev_ops = {
        .ndo_vlan_rx_kill_vid   = cpsw_ndo_vlan_rx_kill_vid,
 };
 
+static int cpsw_get_regs_len(struct net_device *ndev)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+
+       return priv->data.ale_entries * ALE_ENTRY_WORDS * sizeof(u32);
+}
+
+static void cpsw_get_regs(struct net_device *ndev,
+                         struct ethtool_regs *regs, void *p)
+{
+       struct cpsw_priv *priv = netdev_priv(ndev);
+       u32 *reg = p;
+
+       /* update CPSW IP version */
+       regs->version = priv->version;
+
+       cpsw_ale_dump(priv->ale, reg);
+}
+
 static void cpsw_get_drvinfo(struct net_device *ndev,
                             struct ethtool_drvinfo *info)
 {
        struct cpsw_priv *priv = netdev_priv(ndev);
 
-       strlcpy(info->driver, "TI CPSW Driver v1.0", sizeof(info->driver));
+       strlcpy(info->driver, "cpsw", sizeof(info->driver));
        strlcpy(info->version, "1.0", sizeof(info->version));
        strlcpy(info->bus_info, priv->pdev->name, sizeof(info->bus_info));
+       info->regdump_len = cpsw_get_regs_len(ndev);
 }
 
 static u32 cpsw_get_msglevel(struct net_device *ndev)
@@ -1781,6 +1799,8 @@ static const struct ethtool_ops cpsw_ethtool_ops = {
        .get_ethtool_stats      = cpsw_get_ethtool_stats,
        .get_wol        = cpsw_get_wol,
        .set_wol        = cpsw_set_wol,
+       .get_regs_len   = cpsw_get_regs_len,
+       .get_regs       = cpsw_get_regs,
 };
 
 static void cpsw_slave_init(struct cpsw_slave *slave, struct cpsw_priv *priv,
index 7f89306..0579b22 100644 (file)
@@ -25,8 +25,6 @@
 #include "cpsw_ale.h"
 
 #define BITMASK(bits)          (BIT(bits) - 1)
-#define ALE_ENTRY_BITS         68
-#define ALE_ENTRY_WORDS        DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
 
 #define ALE_VERSION_MAJOR(rev) ((rev >> 8) & 0xff)
 #define ALE_VERSION_MINOR(rev) (rev & 0xff)
@@ -763,3 +761,13 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
        kfree(ale);
        return 0;
 }
+
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data)
+{
+       int i;
+
+       for (i = 0; i < ale->params.ale_entries; i++) {
+               cpsw_ale_read(ale, i, data);
+               data += ALE_ENTRY_WORDS;
+       }
+}
index de409c3..31cf43c 100644 (file)
@@ -80,6 +80,9 @@ enum cpsw_ale_port_state {
 #define ALE_MCAST_FWD_LEARN            2
 #define ALE_MCAST_FWD_2                        3
 
+#define ALE_ENTRY_BITS         68
+#define ALE_ENTRY_WORDS        DIV_ROUND_UP(ALE_ENTRY_BITS, 32)
+
 struct cpsw_ale *cpsw_ale_create(struct cpsw_ale_params *params);
 int cpsw_ale_destroy(struct cpsw_ale *ale);
 
@@ -104,5 +107,6 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
 int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
 int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
                         int control, int value);
+void cpsw_ale_dump(struct cpsw_ale *ale, u32 *data);
 
 #endif
index 6b56f85..ab92f67 100644 (file)
@@ -256,23 +256,21 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
                      u16 ts_seqid, u8 ts_msgtype)
 {
        u16 *seqid;
-       unsigned int offset;
+       unsigned int offset = 0;
        u8 *msgtype, *data = skb->data;
 
-       switch (ptp_class) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (ptp_class & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (ptp_class & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
index 735dc53..2791f6f 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/davinci_emac.h>
 #include <linux/of.h>
 #include <linux/of_device.h>
+#include <linux/of_mdio.h>
 #include <linux/pinctrl/consumer.h>
 
 /*
@@ -95,6 +96,10 @@ struct davinci_mdio_data {
        struct mii_bus  *bus;
        bool            suspended;
        unsigned long   access_time; /* jiffies */
+       /* Indicates that driver shouldn't modify phy_mask in case
+        * if MDIO bus is registered from DT.
+        */
+       bool            skip_scan;
 };
 
 static void __davinci_mdio_reset(struct davinci_mdio_data *data)
@@ -144,6 +149,9 @@ static int davinci_mdio_reset(struct mii_bus *bus)
        dev_info(data->dev, "davinci mdio revision %d.%d\n",
                 (ver >> 8) & 0xff, ver & 0xff);
 
+       if (data->skip_scan)
+               return 0;
+
        /* get phy mask from the alive register */
        phy_mask = __raw_readl(&data->regs->alive);
        if (phy_mask) {
@@ -369,8 +377,17 @@ static int davinci_mdio_probe(struct platform_device *pdev)
                goto bail_out;
        }
 
-       /* register the mii bus */
-       ret = mdiobus_register(data->bus);
+       /* register the mii bus
+        * Create PHYs from DT only in case if PHY child nodes are explicitly
+        * defined to support backward compatibility with DTs which assume that
+        * Davinci MDIO will always scan the bus for PHYs detection.
+        */
+       if (dev->of_node && of_get_child_count(dev->of_node)) {
+               data->skip_scan = true;
+               ret = of_mdiobus_register(data->bus, dev->of_node);
+       } else {
+               ret = mdiobus_register(data->bus);
+       }
        if (ret)
                goto bail_out;
 
index 62b19be..6078342 100644 (file)
@@ -69,10 +69,6 @@ MODULE_AUTHOR("Maintainer: Samuel Chessman <chessman@tux.org>");
 MODULE_DESCRIPTION("Driver for TI ThunderLAN based ethernet PCI adapters");
 MODULE_LICENSE("GPL");
 
-
-/* Define this to enable Link beat monitoring */
-#undef MONITOR
-
 /* Turn on debugging. See Documentation/networking/tlan.txt for details */
 static  int            debug;
 module_param(debug, int, 0);
@@ -107,8 +103,10 @@ static struct board {
        { "Compaq Netelligent 10/100 TX Embedded UTP",
          TLAN_ADAPTER_NONE, 0x83 },
        { "Olicom OC-2183/2185", TLAN_ADAPTER_USE_INTERN_10, 0x83 },
-       { "Olicom OC-2325", TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
-       { "Olicom OC-2326", TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
+       { "Olicom OC-2325", TLAN_ADAPTER_ACTIVITY_LED |
+         TLAN_ADAPTER_UNMANAGED_PHY, 0xf8 },
+       { "Olicom OC-2326", TLAN_ADAPTER_ACTIVITY_LED |
+         TLAN_ADAPTER_USE_INTERN_10, 0xf8 },
        { "Compaq Netelligent 10/100 TX UTP", TLAN_ADAPTER_ACTIVITY_LED, 0x83 },
        { "Compaq Netelligent 10 T/2 PCI UTP/coax", TLAN_ADAPTER_NONE, 0x83 },
        { "Compaq NetFlex-3/E",
@@ -192,9 +190,7 @@ static void tlan_phy_power_up(struct net_device *);
 static void    tlan_phy_reset(struct net_device *);
 static void    tlan_phy_start_link(struct net_device *);
 static void    tlan_phy_finish_auto_neg(struct net_device *);
-#ifdef MONITOR
-static void     tlan_phy_monitor(struct net_device *);
-#endif
+static void     tlan_phy_monitor(unsigned long);
 
 /*
   static int   tlan_phy_nop(struct net_device *);
@@ -337,6 +333,7 @@ static void tlan_stop(struct net_device *dev)
 {
        struct tlan_priv *priv = netdev_priv(dev);
 
+       del_timer_sync(&priv->media_timer);
        tlan_read_and_clear_stats(dev, TLAN_RECORD);
        outl(TLAN_HC_AD_RST, dev->base_addr + TLAN_HOST_CMD);
        /* Reset and power down phy */
@@ -368,8 +365,10 @@ static int tlan_suspend(struct pci_dev *pdev, pm_message_t state)
 static int tlan_resume(struct pci_dev *pdev)
 {
        struct net_device *dev = pci_get_drvdata(pdev);
+       int rc = pci_enable_device(pdev);
 
-       pci_set_power_state(pdev, PCI_D0);
+       if (rc)
+               return rc;
        pci_restore_state(pdev);
        pci_enable_wake(pdev, PCI_D0, 0);
        netif_device_attach(dev);
@@ -781,7 +780,43 @@ static const struct net_device_ops tlan_netdev_ops = {
 #endif
 };
 
+static void tlan_get_drvinfo(struct net_device *dev,
+                            struct ethtool_drvinfo *info)
+{
+       struct tlan_priv *priv = netdev_priv(dev);
+
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       if (priv->pci_dev)
+               strlcpy(info->bus_info, pci_name(priv->pci_dev),
+                       sizeof(info->bus_info));
+       else
+               strlcpy(info->bus_info, "EISA", sizeof(info->bus_info));
+       info->eedump_len = TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom_len(struct net_device *dev)
+{
+       return TLAN_EEPROM_SIZE;
+}
+
+static int tlan_get_eeprom(struct net_device *dev,
+                          struct ethtool_eeprom *eeprom, u8 *data)
+{
+       int i;
+
+       for (i = 0; i < TLAN_EEPROM_SIZE; i++)
+               if (tlan_ee_read_byte(dev, i, &data[i]))
+                       return -EIO;
 
+       return 0;
+}
+
+static const struct ethtool_ops tlan_ethtool_ops = {
+       .get_drvinfo    = tlan_get_drvinfo,
+       .get_link       = ethtool_op_get_link,
+       .get_eeprom_len = tlan_get_eeprom_len,
+       .get_eeprom     = tlan_get_eeprom,
+};
 
 /***************************************************************
  *     tlan_init
@@ -830,7 +865,7 @@ static int tlan_init(struct net_device *dev)
                priv->rx_list_dma + sizeof(struct tlan_list)*TLAN_NUM_RX_LISTS;
 
        err = 0;
-       for (i = 0;  i < 6 ; i++)
+       for (i = 0; i < ETH_ALEN; i++)
                err |= tlan_ee_read_byte(dev,
                                         (u8) priv->adapter->addr_ofs + i,
                                         (u8 *) &dev->dev_addr[i]);
@@ -838,12 +873,20 @@ static int tlan_init(struct net_device *dev)
                pr_err("%s: Error reading MAC from eeprom: %d\n",
                       dev->name, err);
        }
-       dev->addr_len = 6;
+       /* Olicom OC-2325/OC-2326 have the address byte-swapped */
+       if (priv->adapter->addr_ofs == 0xf8) {
+               for (i = 0; i < ETH_ALEN; i += 2) {
+                       char tmp = dev->dev_addr[i];
+                       dev->dev_addr[i] = dev->dev_addr[i + 1];
+                       dev->dev_addr[i + 1] = tmp;
+               }
+       }
 
        netif_carrier_off(dev);
 
        /* Device methods */
        dev->netdev_ops = &tlan_netdev_ops;
+       dev->ethtool_ops = &tlan_ethtool_ops;
        dev->watchdog_timeo = TX_TIMEOUT;
 
        return 0;
@@ -886,6 +929,7 @@ static int tlan_open(struct net_device *dev)
        }
 
        init_timer(&priv->timer);
+       init_timer(&priv->media_timer);
 
        tlan_start(dev);
 
@@ -1156,9 +1200,6 @@ static irqreturn_t tlan_handle_interrupt(int irq, void *dev_id)
 
 static int tlan_close(struct net_device *dev)
 {
-       struct tlan_priv *priv = netdev_priv(dev);
-
-       priv->neg_be_verbose = 0;
        tlan_stop(dev);
 
        free_irq(dev->irq, dev);
@@ -1808,11 +1849,6 @@ static void tlan_timer(unsigned long data)
        priv->timer.function = NULL;
 
        switch (priv->timer_type) {
-#ifdef MONITOR
-       case TLAN_TIMER_LINK_BEAT:
-               tlan_phy_monitor(dev);
-               break;
-#endif
        case TLAN_TIMER_PHY_PDOWN:
                tlan_phy_power_down(dev);
                break;
@@ -1856,8 +1892,6 @@ static void tlan_timer(unsigned long data)
 }
 
 
-
-
 /*****************************************************************************
 ******************************************************************************
 
@@ -2205,7 +2239,9 @@ tlan_reset_adapter(struct net_device *dev)
                }
        }
 
-       if (priv->phy_num == 0)
+       /* don't power down internal PHY if we're going to use it */
+       if (priv->phy_num == 0 ||
+          (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))
                data |= TLAN_NET_CFG_PHY_EN;
        tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, (u16) data);
 
@@ -2255,42 +2291,39 @@ tlan_finish_reset(struct net_device *dev)
                tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
                udelay(1000);
                tlan_mii_read_reg(dev, phy, MII_GEN_STS, &status);
-               if ((status & MII_GS_LINK) &&
-                   /* We only support link info on Nat.Sem. PHY's */
-                   (tlphy_id1 == NAT_SEM_ID1) &&
-                   (tlphy_id2 == NAT_SEM_ID2)) {
-                       tlan_mii_read_reg(dev, phy, MII_AN_LPA, &partner);
-                       tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR, &tlphy_par);
-
-                       netdev_info(dev,
-                                   "Link active with %s %uMbps %s-Duplex\n",
-                                   !(tlphy_par & TLAN_PHY_AN_EN_STAT)
-                                   ? "forced" : "Autonegotiation enabled,",
-                                   tlphy_par & TLAN_PHY_SPEED_100
-                                   ? 100 : 10,
-                                   tlphy_par & TLAN_PHY_DUPLEX_FULL
-                                   ? "Full" : "Half");
-
-                       if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
-                               netdev_info(dev, "Partner capability:");
-                               for (i = 5; i < 10; i++)
-                                       if (partner & (1 << i))
-                                               pr_cont(" %s", media[i-5]);
-                               pr_cont("\n");
-                       }
-
-                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
-                                       TLAN_LED_LINK);
-#ifdef MONITOR
-                       /* We have link beat..for now anyway */
-                       priv->link = 1;
-                       /*Enabling link beat monitoring */
-                       tlan_set_timer(dev, (10*HZ), TLAN_TIMER_LINK_BEAT);
-#endif
-               } else if (status & MII_GS_LINK)  {
-                       netdev_info(dev, "Link active\n");
-                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG,
-                                       TLAN_LED_LINK);
+               if (status & MII_GS_LINK) {
+                       /* We only support link info on Nat.Sem. PHY's */
+                       if ((tlphy_id1 == NAT_SEM_ID1) &&
+                           (tlphy_id2 == NAT_SEM_ID2)) {
+                               tlan_mii_read_reg(dev, phy, MII_AN_LPA,
+                                       &partner);
+                               tlan_mii_read_reg(dev, phy, TLAN_TLPHY_PAR,
+                                       &tlphy_par);
+
+                               netdev_info(dev,
+                                       "Link active, %s %uMbps %s-Duplex\n",
+                                       !(tlphy_par & TLAN_PHY_AN_EN_STAT)
+                                       ? "forced" : "Autonegotiation enabled,",
+                                       tlphy_par & TLAN_PHY_SPEED_100
+                                       ? 100 : 10,
+                                       tlphy_par & TLAN_PHY_DUPLEX_FULL
+                                       ? "Full" : "Half");
+
+                               if (tlphy_par & TLAN_PHY_AN_EN_STAT) {
+                                       netdev_info(dev, "Partner capability:");
+                                       for (i = 5; i < 10; i++)
+                                               if (partner & (1 << i))
+                                                       pr_cont(" %s",
+                                                               media[i-5]);
+                                       pr_cont("\n");
+                               }
+                       } else
+                               netdev_info(dev, "Link active\n");
+                       /* Enabling link beat monitoring */
+                       priv->media_timer.function = tlan_phy_monitor;
+                       priv->media_timer.data = (unsigned long) dev;
+                       priv->media_timer.expires = jiffies + HZ;
+                       add_timer(&priv->media_timer);
                }
        }
 
@@ -2312,6 +2345,7 @@ tlan_finish_reset(struct net_device *dev)
                             dev->base_addr + TLAN_HOST_CMD + 1);
                outl(priv->rx_list_dma, dev->base_addr + TLAN_CH_PARM);
                outl(TLAN_HC_GO | TLAN_HC_RT, dev->base_addr + TLAN_HOST_CMD);
+               tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
                netif_carrier_on(dev);
        } else {
                netdev_info(dev, "Link inactive, will retry in 10 secs...\n");
@@ -2494,9 +2528,10 @@ static void tlan_phy_power_down(struct net_device *dev)
        value = MII_GC_PDOWN | MII_GC_LOOPBK | MII_GC_ISOLATE;
        tlan_mii_sync(dev->base_addr);
        tlan_mii_write_reg(dev, priv->phy[priv->phy_num], MII_GEN_CTL, value);
-       if ((priv->phy_num == 0) &&
-           (priv->phy[1] != TLAN_PHY_NONE) &&
-           (!(priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10))) {
+       if ((priv->phy_num == 0) && (priv->phy[1] != TLAN_PHY_NONE)) {
+               /* if using internal PHY, the external PHY must be powered on */
+               if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10)
+                       value = MII_GC_ISOLATE; /* just isolate it from MII */
                tlan_mii_sync(dev->base_addr);
                tlan_mii_write_reg(dev, priv->phy[1], MII_GEN_CTL, value);
        }
@@ -2538,6 +2573,7 @@ static void tlan_phy_reset(struct net_device *dev)
        struct tlan_priv        *priv = netdev_priv(dev);
        u16             phy;
        u16             value;
+       unsigned long timeout = jiffies + HZ;
 
        phy = priv->phy[priv->phy_num];
 
@@ -2545,9 +2581,13 @@ static void tlan_phy_reset(struct net_device *dev)
        tlan_mii_sync(dev->base_addr);
        value = MII_GC_LOOPBK | MII_GC_RESET;
        tlan_mii_write_reg(dev, phy, MII_GEN_CTL, value);
-       tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
-       while (value & MII_GC_RESET)
+       do {
                tlan_mii_read_reg(dev, phy, MII_GEN_CTL, &value);
+               if (time_after(jiffies, timeout)) {
+                       netdev_err(dev, "PHY reset timeout\n");
+                       return;
+               }
+       } while (value & MII_GC_RESET);
 
        /* Wait for 500 ms and initialize.
         * I don't remember why I wait this long.
@@ -2653,7 +2693,6 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
        struct tlan_priv        *priv = netdev_priv(dev);
        u16             an_adv;
        u16             an_lpa;
-       u16             data;
        u16             mode;
        u16             phy;
        u16             status;
@@ -2668,13 +2707,7 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
                /* Wait for 8 sec to give the process
                 * more time.  Perhaps we should fail after a while.
                 */
-               if (!priv->neg_be_verbose++) {
-                       pr_info("Giving autonegotiation more time.\n");
-                       pr_info("Please check that your adapter has\n");
-                       pr_info("been properly connected to a HUB or Switch.\n");
-                       pr_info("Trying to establish link in the background...\n");
-               }
-               tlan_set_timer(dev, (8*HZ), TLAN_TIMER_PHY_FINISH_AN);
+               tlan_set_timer(dev, 2 * HZ, TLAN_TIMER_PHY_FINISH_AN);
                return;
        }
 
@@ -2687,13 +2720,11 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
        else if (!(mode & 0x0080) && (mode & 0x0040))
                priv->tlan_full_duplex = true;
 
+       /* switch to internal PHY for 10 Mbps */
        if ((!(mode & 0x0180)) &&
            (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) &&
            (priv->phy_num != 0)) {
                priv->phy_num = 0;
-               data = TLAN_NET_CFG_1FRAG | TLAN_NET_CFG_1CHAN
-                       | TLAN_NET_CFG_PHY_EN;
-               tlan_dio_write16(dev->base_addr, TLAN_NET_CONFIG, data);
                tlan_set_timer(dev, (400*HZ/1000), TLAN_TIMER_PHY_PDOWN);
                return;
        }
@@ -2717,7 +2748,6 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
 
 }
 
-#ifdef MONITOR
 
 /*********************************************************************
  *
@@ -2727,18 +2757,18 @@ static void tlan_phy_finish_auto_neg(struct net_device *dev)
  *           None
  *
  *     Params:
- *           dev            The device structure of this device.
+ *           data           The device structure of this device.
  *
  *
  *     This function monitors PHY condition by reading the status
- *     register via the MII bus. This can be used to give info
- *     about link changes (up/down), and possible switch to alternate
- *     media.
+ *     register via the MII bus, controls LINK LED and notifies the
+ *     kernel about link state.
  *
  *******************************************************************/
 
-void tlan_phy_monitor(struct net_device *dev)
+static void tlan_phy_monitor(unsigned long data)
 {
+       struct net_device *dev = (struct net_device *) data;
        struct tlan_priv *priv = netdev_priv(dev);
        u16     phy;
        u16     phy_status;
@@ -2750,30 +2780,40 @@ void tlan_phy_monitor(struct net_device *dev)
 
        /* Check if link has been lost */
        if (!(phy_status & MII_GS_LINK)) {
-               if (priv->link) {
-                       priv->link = 0;
+               if (netif_carrier_ok(dev)) {
                        printk(KERN_DEBUG "TLAN: %s has lost link\n",
                               dev->name);
+                       tlan_dio_write8(dev->base_addr, TLAN_LED_REG, 0);
                        netif_carrier_off(dev);
-                       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
-                       return;
+                       if (priv->adapter->flags & TLAN_ADAPTER_USE_INTERN_10) {
+                               /* power down internal PHY */
+                               u16 data = MII_GC_PDOWN | MII_GC_LOOPBK |
+                                          MII_GC_ISOLATE;
+
+                               tlan_mii_sync(dev->base_addr);
+                               tlan_mii_write_reg(dev, priv->phy[0],
+                                                  MII_GEN_CTL, data);
+                               /* set to external PHY */
+                               priv->phy_num = 1;
+                               /* restart autonegotiation */
+                               tlan_set_timer(dev, 4 * HZ / 10,
+                                              TLAN_TIMER_PHY_PDOWN);
+                               return;
+                       }
                }
        }
 
        /* Link restablished? */
-       if ((phy_status & MII_GS_LINK) && !priv->link) {
-               priv->link = 1;
+       if ((phy_status & MII_GS_LINK) && !netif_carrier_ok(dev)) {
+               tlan_dio_write8(dev->base_addr, TLAN_LED_REG, TLAN_LED_LINK);
                printk(KERN_DEBUG "TLAN: %s has reestablished link\n",
                       dev->name);
                netif_carrier_on(dev);
        }
-
-       /* Setup a new monitor */
-       tlan_set_timer(dev, (2*HZ), TLAN_TIMER_LINK_BEAT);
+       priv->media_timer.expires = jiffies + HZ;
+       add_timer(&priv->media_timer);
 }
 
-#endif /* MONITOR */
-
 
 /*****************************************************************************
 ******************************************************************************
index 2eb33a2..e992841 100644 (file)
@@ -195,6 +195,7 @@ struct tlan_priv {
        u32                     timer_set_at;
        u32                     timer_type;
        struct timer_list       timer;
+       struct timer_list       media_timer;
        struct board            *adapter;
        u32                     adapter_rev;
        u32                     aui;
@@ -206,9 +207,7 @@ struct tlan_priv {
        u8                      tlan_rev;
        u8                      tlan_full_duplex;
        spinlock_t              lock;
-       u8                      link;
        struct work_struct                      tlan_tqueue;
-       u8                      neg_be_verbose;
 };
 
 
@@ -219,7 +218,6 @@ struct tlan_priv {
         *
         ****************************************************************/
 
-#define TLAN_TIMER_LINK_BEAT           1
 #define TLAN_TIMER_ACTIVITY            2
 #define TLAN_TIMER_PHY_PDOWN           3
 #define TLAN_TIMER_PHY_PUP             4
@@ -241,6 +239,7 @@ struct tlan_priv {
 #define TLAN_EEPROM_ACK                0
 #define TLAN_EEPROM_STOP       1
 
+#define TLAN_EEPROM_SIZE       256
 
 
 
index 14389f8..69557a2 100644 (file)
@@ -2191,7 +2191,6 @@ static void tile_net_setup(struct net_device *dev)
 static void tile_net_dev_init(const char *name, const uint8_t *mac)
 {
        int ret;
-       int i;
        struct net_device *dev;
        struct tile_net_priv *priv;
 
@@ -2202,8 +2201,8 @@ static void tile_net_dev_init(const char *name, const uint8_t *mac)
        /* Allocate the device structure.  Normally, "name" is a
         * template, instantiated by register_netdev(), but not for us.
         */
-       dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
-                              NR_CPUS, 1);
+       dev = alloc_netdev_mqs(sizeof(*priv), name, NET_NAME_UNKNOWN,
+                              tile_net_setup, NR_CPUS, 1);
        if (!dev) {
                pr_err("alloc_netdev_mqs(%s) failed\n", name);
                return;
index e5a5c5d..88c7121 100644 (file)
@@ -2292,7 +2292,8 @@ static struct net_device *tile_net_dev_init(const char *name)
         * tile_net_setup(), and saves "name".  Normally, "name" is a
         * template, instantiated by register_netdev(), but not for us.
         */
-       dev = alloc_netdev(sizeof(*priv), name, tile_net_setup);
+       dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
+                          tile_net_setup);
        if (!dev) {
                pr_err("alloc_netdev(%s) failed\n", name);
                return NULL;
index d568af1..0a7f2e7 100644 (file)
@@ -723,13 +723,10 @@ static int gelic_wl_get_scan(struct net_device *netdev,
                /* If a scan in progress, caller should call me again */
                ret = -EAGAIN;
                goto out;
-               break;
-
        case GELIC_WL_SCAN_STAT_INIT:
                /* last scan request failed or never issued */
                ret = -ENODEV;
                goto out;
-               break;
        case GELIC_WL_SCAN_STAT_GOT_LIST:
                /* ok, use current list */
                break;
@@ -1831,25 +1828,18 @@ static const char *wpasecstr(enum gelic_eurus_wpa_security sec)
        switch (sec) {
        case GELIC_EURUS_WPA_SEC_NONE:
                return "NONE";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA_TKIP_TKIP:
                return "WPA_TKIP_TKIP";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA_TKIP_AES:
                return "WPA_TKIP_AES";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA_AES_AES:
                return "WPA_AES_AES";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA2_TKIP_TKIP:
                return "WPA2_TKIP_TKIP";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA2_TKIP_AES:
                return "WPA2_TKIP_AES";
-               break;
        case GELIC_EURUS_WPA_SEC_WPA2_AES_AES:
                return "WPA2_AES_AES";
-               break;
        }
        return "";
 };
index 4ef818a..8a6e5c2 100644 (file)
@@ -72,7 +72,7 @@ void temac_iow(struct temac_local *lp, int offset, u32 value)
 
 int temac_indirect_busywait(struct temac_local *lp)
 {
-       long end = jiffies + 2;
+       unsigned long end = jiffies + 2;
 
        while (!(temac_ior(lp, XTE_RDY0_OFFSET) & XTE_RDY0_HARD_ACS_RDY_MASK)) {
                if (time_before_eq(end, jiffies)) {
index d4abf47..3b67d60 100644 (file)
@@ -19,7 +19,7 @@
 /* Wait till MDIO interface is ready to accept a new transaction.*/
 int axienet_mdio_wait_until_ready(struct axienet_local *lp)
 {
-       long end = jiffies + 2;
+       unsigned long end = jiffies + 2;
        while (!(axienet_ior(lp, XAE_MDIO_MCR_OFFSET) &
                 XAE_MDIO_MCR_READY_MASK)) {
                if (time_before_eq(end, jiffies)) {
index 8c4aed3..782bb93 100644 (file)
@@ -695,7 +695,7 @@ static irqreturn_t xemaclite_interrupt(int irq, void *dev_id)
 
 static int xemaclite_mdio_wait(struct net_local *lp)
 {
-       long end = jiffies + 2;
+       unsigned long end = jiffies + 2;
 
        /* wait for the MDIO interface to not be busy or timeout
           after some time.
index eb78203..6eb849a 100644 (file)
  *             14 Jun 2005     macro           Use irqreturn_t.
  *             23 Oct 2006     macro           Big-endian host support.
  *             14 Dec 2006     macro           TURBOchannel support.
+ *             01 Jul 2014     macro           Fixes for DMA on 64-bit hosts.
  */
 
 /* Include files */
 
 /* Version information string should be updated prior to each new release!  */
 #define DRV_NAME "defxx"
-#define DRV_VERSION "v1.10"
-#define DRV_RELDATE "2006/12/14"
+#define DRV_VERSION "v1.11"
+#define DRV_RELDATE "2014/07/01"
 
 static char version[] =
        DRV_NAME ": " DRV_VERSION " " DRV_RELDATE
@@ -291,7 +292,11 @@ static int         dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
 
 static int             dfx_rcv_init(DFX_board_t *bp, int get_buffers);
 static void            dfx_rcv_queue_process(DFX_board_t *bp);
+#ifdef DYNAMIC_BUFFERS
 static void            dfx_rcv_flush(DFX_board_t *bp);
+#else
+static inline void     dfx_rcv_flush(DFX_board_t *bp) {}
+#endif
 
 static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
                                     struct net_device *dev);
@@ -1122,17 +1127,16 @@ static int dfx_driver_init(struct net_device *dev, const char *print_name,
 
        /* Display virtual and physical addresses if debug driver */
 
-       DBG_printk("%s: Descriptor block virt = %0lX, phys = %0X\n",
-                  print_name,
-                  (long)bp->descr_block_virt, bp->descr_block_phys);
-       DBG_printk("%s: Command Request buffer virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->cmd_req_virt, bp->cmd_req_phys);
-       DBG_printk("%s: Command Response buffer virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->cmd_rsp_virt, bp->cmd_rsp_phys);
-       DBG_printk("%s: Receive buffer block virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->rcv_block_virt, bp->rcv_block_phys);
-       DBG_printk("%s: Consumer block virt = %0lX, phys = %0X\n",
-                  print_name, (long)bp->cons_block_virt, bp->cons_block_phys);
+       DBG_printk("%s: Descriptor block virt = %p, phys = %pad\n",
+                  print_name, bp->descr_block_virt, &bp->descr_block_phys);
+       DBG_printk("%s: Command Request buffer virt = %p, phys = %pad\n",
+                  print_name, bp->cmd_req_virt, &bp->cmd_req_phys);
+       DBG_printk("%s: Command Response buffer virt = %p, phys = %pad\n",
+                  print_name, bp->cmd_rsp_virt, &bp->cmd_rsp_phys);
+       DBG_printk("%s: Receive buffer block virt = %p, phys = %pad\n",
+                  print_name, bp->rcv_block_virt, &bp->rcv_block_phys);
+       DBG_printk("%s: Consumer block virt = %p, phys = %pad\n",
+                  print_name, bp->cons_block_virt, &bp->cons_block_phys);
 
        return DFX_K_SUCCESS;
 }
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
  *     Align an sk_buff to a boundary power of 2
  *
  */
-
+#ifdef DYNAMIC_BUFFERS
 static void my_skb_align(struct sk_buff *skb, int n)
 {
        unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
 
        skb_reserve(skb, v - x);
 }
-
+#endif
 
 /*
  * ================
@@ -2923,21 +2927,35 @@ static int dfx_rcv_init(DFX_board_t *bp, int get_buffers)
        for (i = 0; i < (int)(bp->rcv_bufs_to_post); i++)
                for (j = 0; (i + j) < (int)PI_RCV_DATA_K_NUM_ENTRIES; j += bp->rcv_bufs_to_post)
                {
-                       struct sk_buff *newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE, GFP_NOIO);
+                       struct sk_buff *newskb;
+                       dma_addr_t dma_addr;
+
+                       newskb = __netdev_alloc_skb(bp->dev, NEW_SKB_SIZE,
+                                                   GFP_NOIO);
                        if (!newskb)
                                return -ENOMEM;
-                       bp->descr_block_virt->rcv_data[i+j].long_0 = (u32) (PI_RCV_DESCR_M_SOP |
-                               ((PI_RCV_DATA_K_SIZE_MAX / PI_ALIGN_K_RCV_DATA_BUFF) << PI_RCV_DESCR_V_SEG_LEN));
                        /*
                         * align to 128 bytes for compatibility with
                         * the old EISA boards.
                         */
 
                        my_skb_align(newskb, 128);
+                       dma_addr = dma_map_single(bp->bus_dev,
+                                                 newskb->data,
+                                                 PI_RCV_DATA_K_SIZE_MAX,
+                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(bp->bus_dev, dma_addr)) {
+                               dev_kfree_skb(newskb);
+                               return -ENOMEM;
+                       }
+                       bp->descr_block_virt->rcv_data[i + j].long_0 =
+                               (u32)(PI_RCV_DESCR_M_SOP |
+                                     ((PI_RCV_DATA_K_SIZE_MAX /
+                                       PI_ALIGN_K_RCV_DATA_BUFF) <<
+                                      PI_RCV_DESCR_V_SEG_LEN));
                        bp->descr_block_virt->rcv_data[i + j].long_1 =
-                               (u32)dma_map_single(bp->bus_dev, newskb->data,
-                                                   NEW_SKB_SIZE,
-                                                   DMA_FROM_DEVICE);
+                               (u32)dma_addr;
+
                        /*
                         * p_rcv_buff_va is only used inside the
                         * kernel so we put the skb pointer here.
@@ -3004,7 +3022,7 @@ static void dfx_rcv_queue_process(
        PI_TYPE_2_CONSUMER      *p_type_2_cons;         /* ptr to rcv/xmt consumer block register */
        char                            *p_buff;                        /* ptr to start of packet receive buffer (FMC descriptor) */
        u32                                     descr, pkt_len;         /* FMC descriptor field and packet length */
-       struct sk_buff          *skb;                           /* pointer to a sk_buff to hold incoming packet data */
+       struct sk_buff          *skb = NULL;                    /* pointer to a sk_buff to hold incoming packet data */
 
        /* Service all consumed LLC receive frames */
 
@@ -3012,7 +3030,7 @@ static void dfx_rcv_queue_process(
        while (bp->rcv_xmt_reg.index.rcv_comp != p_type_2_cons->index.rcv_cons)
                {
                /* Process any errors */
-
+               dma_addr_t dma_addr;
                int entry;
 
                entry = bp->rcv_xmt_reg.index.rcv_comp;
@@ -3021,6 +3039,11 @@ static void dfx_rcv_queue_process(
 #else
                p_buff = bp->p_rcv_buff_va[entry];
 #endif
+               dma_addr = bp->descr_block_virt->rcv_data[entry].long_1;
+               dma_sync_single_for_cpu(bp->bus_dev,
+                                       dma_addr + RCV_BUFF_K_DESCR,
+                                       sizeof(u32),
+                                       DMA_FROM_DEVICE);
                memcpy(&descr, p_buff + RCV_BUFF_K_DESCR, sizeof(u32));
 
                if (descr & PI_FMC_DESCR_M_RCC_FLUSH)
@@ -3042,31 +3065,46 @@ static void dfx_rcv_queue_process(
                                bp->rcv_length_errors++;
                        else{
 #ifdef DYNAMIC_BUFFERS
+                               struct sk_buff *newskb = NULL;
+
                                if (pkt_len > SKBUFF_RX_COPYBREAK) {
-                                       struct sk_buff *newskb;
+                                       dma_addr_t new_dma_addr;
 
-                                       newskb = dev_alloc_skb(NEW_SKB_SIZE);
+                                       newskb = netdev_alloc_skb(bp->dev,
+                                                                 NEW_SKB_SIZE);
                                        if (newskb){
+                                               my_skb_align(newskb, 128);
+                                               new_dma_addr = dma_map_single(
+                                                               bp->bus_dev,
+                                                               newskb->data,
+                                                               PI_RCV_DATA_K_SIZE_MAX,
+                                                               DMA_FROM_DEVICE);
+                                               if (dma_mapping_error(
+                                                               bp->bus_dev,
+                                                               new_dma_addr)) {
+                                                       dev_kfree_skb(newskb);
+                                                       newskb = NULL;
+                                               }
+                                       }
+                                       if (newskb) {
                                                rx_in_place = 1;
 
-                                               my_skb_align(newskb, 128);
                                                skb = (struct sk_buff *)bp->p_rcv_buff_va[entry];
                                                dma_unmap_single(bp->bus_dev,
-                                                       bp->descr_block_virt->rcv_data[entry].long_1,
-                                                       NEW_SKB_SIZE,
+                                                       dma_addr,
+                                                       PI_RCV_DATA_K_SIZE_MAX,
                                                        DMA_FROM_DEVICE);
                                                skb_reserve(skb, RCV_BUFF_K_PADDING);
                                                bp->p_rcv_buff_va[entry] = (char *)newskb;
-                                               bp->descr_block_virt->rcv_data[entry].long_1 =
-                                                       (u32)dma_map_single(bp->bus_dev,
-                                                               newskb->data,
-                                                               NEW_SKB_SIZE,
-                                                               DMA_FROM_DEVICE);
-                                       } else
-                                               skb = NULL;
-                               } else
+                                               bp->descr_block_virt->rcv_data[entry].long_1 = (u32)new_dma_addr;
+                                       }
+                               }
+                               if (!newskb)
 #endif
-                                       skb = dev_alloc_skb(pkt_len+3); /* alloc new buffer to pass up, add room for PRH */
+                                       /* Alloc new buffer to pass up,
+                                        * add room for PRH. */
+                                       skb = netdev_alloc_skb(bp->dev,
+                                                              pkt_len + 3);
                                if (skb == NULL)
                                        {
                                        printk("%s: Could not allocate receive buffer.  Dropping packet.\n", bp->dev->name);
@@ -3074,11 +3112,14 @@ static void dfx_rcv_queue_process(
                                        break;
                                        }
                                else {
-#ifndef DYNAMIC_BUFFERS
-                                       if (! rx_in_place)
-#endif
-                                       {
+                                       if (!rx_in_place) {
                                                /* Receive buffer allocated, pass receive packet up */
+                                               dma_sync_single_for_cpu(
+                                                       bp->bus_dev,
+                                                       dma_addr +
+                                                       RCV_BUFF_K_PADDING,
+                                                       pkt_len + 3,
+                                                       DMA_FROM_DEVICE);
 
                                                skb_copy_to_linear_data(skb,
                                                               p_buff + RCV_BUFF_K_PADDING,
@@ -3181,6 +3222,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
        u8                      prod;                           /* local transmit producer index */
        PI_XMT_DESCR            *p_xmt_descr;           /* ptr to transmit descriptor block entry */
        XMT_DRIVER_DESCR        *p_xmt_drv_descr;       /* ptr to transmit driver descriptor */
+       dma_addr_t              dma_addr;
        unsigned long           flags;
 
        netif_stop_queue(dev);
@@ -3228,6 +3270,20 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
                        }
                }
 
+       /* Write the three PRH bytes immediately before the FC byte */
+
+       skb_push(skb, 3);
+       skb->data[0] = DFX_PRH0_BYTE;   /* these byte values are defined */
+       skb->data[1] = DFX_PRH1_BYTE;   /* in the Motorola FDDI MAC chip */
+       skb->data[2] = DFX_PRH2_BYTE;   /* specification */
+
+       dma_addr = dma_map_single(bp->bus_dev, skb->data, skb->len,
+                                 DMA_TO_DEVICE);
+       if (dma_mapping_error(bp->bus_dev, dma_addr)) {
+               skb_pull(skb, 3);
+               return NETDEV_TX_BUSY;
+       }
+
        spin_lock_irqsave(&bp->lock, flags);
 
        /* Get the current producer and the next free xmt data descriptor */
@@ -3248,13 +3304,6 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
 
        p_xmt_drv_descr = &(bp->xmt_drv_descr_blk[prod++]);     /* also bump producer index */
 
-       /* Write the three PRH bytes immediately before the FC byte */
-
-       skb_push(skb,3);
-       skb->data[0] = DFX_PRH0_BYTE;   /* these byte values are defined */
-       skb->data[1] = DFX_PRH1_BYTE;   /* in the Motorola FDDI MAC chip */
-       skb->data[2] = DFX_PRH2_BYTE;   /* specification */
-
        /*
         * Write the descriptor with buffer info and bump producer
         *
@@ -3283,8 +3332,7 @@ static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
         */
 
        p_xmt_descr->long_0     = (u32) (PI_XMT_DESCR_M_SOP | PI_XMT_DESCR_M_EOP | ((skb->len) << PI_XMT_DESCR_V_SEG_LEN));
-       p_xmt_descr->long_1 = (u32)dma_map_single(bp->bus_dev, skb->data,
-                                                 skb->len, DMA_TO_DEVICE);
+       p_xmt_descr->long_1 = (u32)dma_addr;
 
        /*
         * Verify that descriptor is actually available
@@ -3447,16 +3495,17 @@ static void dfx_rcv_flush( DFX_board_t *bp )
                {
                        struct sk_buff *skb;
                        skb = (struct sk_buff *)bp->p_rcv_buff_va[i+j];
-                       if (skb)
+                       if (skb) {
+                               dma_unmap_single(bp->bus_dev,
+                                                bp->descr_block_virt->rcv_data[i+j].long_1,
+                                                PI_RCV_DATA_K_SIZE_MAX,
+                                                DMA_FROM_DEVICE);
                                dev_kfree_skb(skb);
+                       }
                        bp->p_rcv_buff_va[i+j] = NULL;
                }
 
        }
-#else
-static inline void dfx_rcv_flush( DFX_board_t *bp )
-{
-}
 #endif /* DYNAMIC_BUFFERS */
 
 /*
index 19a6f64..adb63f3 100644 (file)
@@ -1693,7 +1693,7 @@ typedef union
 /* Only execute special print call when debug driver was built */
 
 #ifdef DEFXX_DEBUG
-#define DBG_printk(args...) printk(## args)
+#define DBG_printk(args...) printk(args)
 #else
 #define DBG_printk(args...)
 #endif
index 66e2b19..c3c4051 100644 (file)
@@ -596,7 +596,8 @@ static int sixpack_open(struct tty_struct *tty)
        if (tty->ops->write == NULL)
                return -EOPNOTSUPP;
 
-       dev = alloc_netdev(sizeof(struct sixpack), "sp%d", sp_setup);
+       dev = alloc_netdev(sizeof(struct sixpack), "sp%d", NET_NAME_UNKNOWN,
+                          sp_setup);
        if (!dev) {
                err = -ENOMEM;
                goto out;
index 484f77e..a98c153 100644 (file)
@@ -1206,7 +1206,7 @@ static int __init init_baycomepp(void)
                struct net_device *dev;
                
                dev = alloc_netdev(sizeof(struct baycom_state), "bce%d",
-                                  baycom_epp_dev_setup);
+                                  NET_NAME_UNKNOWN, baycom_epp_dev_setup);
 
                if (!dev) {
                        printk(KERN_WARNING "bce%d : out of memory\n", i);
index d50b23c..c2894e4 100644 (file)
@@ -501,8 +501,8 @@ static int bpq_new_device(struct net_device *edev)
        struct net_device *ndev;
        struct bpqdev *bpq;
 
-       ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d",
-                          bpq_setup);
+       ndev = alloc_netdev(sizeof(struct bpqdev), "bpq%d", NET_NAME_UNKNOWN,
+                           bpq_setup);
        if (!ndev)
                return -ENOMEM;
 
index 6636022..0fad408 100644 (file)
@@ -466,7 +466,7 @@ static int __init setup_adapter(int card_base, int type, int n)
        if (!info)
                goto out;
 
-       info->dev[0] = alloc_netdev(0, "", dev_setup);
+       info->dev[0] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
        if (!info->dev[0]) {
                printk(KERN_ERR "dmascc: "
                       "could not allocate memory for %s at %#3x\n",
@@ -474,7 +474,7 @@ static int __init setup_adapter(int card_base, int type, int n)
                goto out1;
        }
 
-       info->dev[1] = alloc_netdev(0, "", dev_setup);
+       info->dev[1] = alloc_netdev(0, "", NET_NAME_UNKNOWN, dev_setup);
        if (!info->dev[1]) {
                printk(KERN_ERR "dmascc: "
                       "could not allocate memory for %s at %#3x\n",
index 5d78c1d..c67a272 100644 (file)
@@ -699,7 +699,7 @@ struct net_device *hdlcdrv_register(const struct hdlcdrv_ops *ops,
        if (privsize < sizeof(struct hdlcdrv_state))
                privsize = sizeof(struct hdlcdrv_state);
 
-       dev = alloc_netdev(privsize, ifname, hdlcdrv_setup);
+       dev = alloc_netdev(privsize, ifname, NET_NAME_UNKNOWN, hdlcdrv_setup);
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
index 8a6c720..f990bb1 100644 (file)
@@ -734,7 +734,8 @@ static int mkiss_open(struct tty_struct *tty)
        if (tty->ops->write == NULL)
                return -EOPNOTSUPP;
 
-       dev = alloc_netdev(sizeof(struct mkiss), "ax%d", ax_setup);
+       dev = alloc_netdev(sizeof(struct mkiss), "ax%d", NET_NAME_UNKNOWN,
+                          ax_setup);
        if (!dev) {
                err = -ENOMEM;
                goto out;
index 4bc6ee8..57be9e0 100644 (file)
@@ -1515,7 +1515,7 @@ static int scc_net_alloc(const char *name, struct scc_channel *scc)
        int err;
        struct net_device *dev;
 
-       dev = alloc_netdev(0, name, scc_net_setup);
+       dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, scc_net_setup);
        if (!dev) 
                return -ENOMEM;
 
index 8190165..717433c 100644 (file)
@@ -1147,7 +1147,7 @@ static int __init yam_init_driver(void)
                sprintf(name, "yam%d", i);
                
                dev = alloc_netdev(sizeof(struct yam_port), name,
-                                  yam_setup);
+                                  NET_NAME_UNKNOWN, yam_setup);
                if (!dev) {
                        pr_err("yam: cannot allocate net device\n");
                        err = -ENOMEM;
index 6cc37c1..02a3ee2 100644 (file)
@@ -170,6 +170,7 @@ struct rndis_device {
 
        enum rndis_device_state state;
        bool link_state;
+       bool link_change;
        atomic_t new_req_id;
 
        spinlock_t request_lock;
@@ -185,7 +186,7 @@ int netvsc_device_remove(struct hv_device *device);
 int netvsc_send(struct hv_device *device,
                struct hv_netvsc_packet *packet);
 void netvsc_linkstatus_callback(struct hv_device *device_obj,
-                               unsigned int status);
+                               struct rndis_message *resp);
 int netvsc_recv_callback(struct hv_device *device_obj,
                        struct hv_netvsc_packet *packet,
                        struct ndis_tcp_ip_checksum_info *csum_info);
@@ -584,7 +585,7 @@ struct nvsp_message {
 
 #define NETVSC_RECEIVE_BUFFER_SIZE             (1024*1024*16)  /* 16MB */
 #define NETVSC_RECEIVE_BUFFER_SIZE_LEGACY      (1024*1024*15)  /* 15MB */
-#define NETVSC_SEND_BUFFER_SIZE                        (1024 * 1024)   /* 1MB */
+#define NETVSC_SEND_BUFFER_SIZE                        (1024 * 1024 * 16)   /* 16MB */
 #define NETVSC_INVALID_INDEX                   -1
 
 
index c041f63..66979cf 100644 (file)
@@ -189,12 +189,11 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
                                   "unable to teardown send buffer's gpadl\n");
                        return ret;
                }
-               net_device->recv_buf_gpadl_handle = 0;
+               net_device->send_buf_gpadl_handle = 0;
        }
        if (net_device->send_buf) {
                /* Free up the receive buffer */
-               free_pages((unsigned long)net_device->send_buf,
-                          get_order(net_device->send_buf_size));
+               vfree(net_device->send_buf);
                net_device->send_buf = NULL;
        }
        kfree(net_device->send_section_map);
@@ -303,9 +302,7 @@ static int netvsc_init_buf(struct hv_device *device)
 
        /* Now setup the send buffer.
         */
-       net_device->send_buf =
-               (void *)__get_free_pages(GFP_KERNEL|__GFP_ZERO,
-                                        get_order(net_device->send_buf_size));
+       net_device->send_buf = vzalloc(net_device->send_buf_size);
        if (!net_device->send_buf) {
                netdev_err(ndev, "unable to allocate send "
                           "buffer of size %d\n", net_device->send_buf_size);
@@ -378,8 +375,10 @@ static int netvsc_init_buf(struct hv_device *device)
 
        net_device->send_section_map =
                kzalloc(net_device->map_words * sizeof(ulong), GFP_KERNEL);
-       if (net_device->send_section_map == NULL)
+       if (net_device->send_section_map == NULL) {
+               ret = -ENOMEM;
                goto cleanup;
+       }
 
        goto exit;
 
@@ -1094,9 +1093,7 @@ close:
        vmbus_close(device->channel);
 
 cleanup:
-
-       if (net_device)
-               kfree(net_device);
+       kfree(net_device);
 
        return ret;
 }
index 4fd71b7..a9c5eaa 100644 (file)
@@ -579,8 +579,9 @@ drop:
  * netvsc_linkstatus_callback - Link up/down notification
  */
 void netvsc_linkstatus_callback(struct hv_device *device_obj,
-                                      unsigned int status)
+                               struct rndis_message *resp)
 {
+       struct rndis_indicate_status *indicate = &resp->msg.indicate_status;
        struct net_device *net;
        struct net_device_context *ndev_ctx;
        struct netvsc_device *net_device;
@@ -589,7 +590,19 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
        net_device = hv_get_drvdata(device_obj);
        rdev = net_device->extension;
 
-       rdev->link_state = status != 1;
+       switch (indicate->status) {
+       case RNDIS_STATUS_MEDIA_CONNECT:
+               rdev->link_state = false;
+               break;
+       case RNDIS_STATUS_MEDIA_DISCONNECT:
+               rdev->link_state = true;
+               break;
+       case RNDIS_STATUS_NETWORK_CHANGE:
+               rdev->link_change = true;
+               break;
+       default:
+               return;
+       }
 
        net = net_device->ndev;
 
@@ -597,7 +610,7 @@ void netvsc_linkstatus_callback(struct hv_device *device_obj,
                return;
 
        ndev_ctx = netdev_priv(net);
-       if (status == 1) {
+       if (!rdev->link_state) {
                schedule_delayed_work(&ndev_ctx->dwork, 0);
                schedule_delayed_work(&ndev_ctx->dwork, msecs_to_jiffies(20));
        } else {
@@ -736,6 +749,14 @@ static int netvsc_set_mac_addr(struct net_device *ndev, void *p)
        return err;
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void netvsc_poll_controller(struct net_device *net)
+{
+       /* As netvsc_start_xmit() works synchronous we don't have to
+        * trigger anything here.
+        */
+}
+#endif
 
 static const struct ethtool_ops ethtool_ops = {
        .get_drvinfo    = netvsc_get_drvinfo,
@@ -751,6 +772,9 @@ static const struct net_device_ops device_ops = {
        .ndo_validate_addr =            eth_validate_addr,
        .ndo_set_mac_address =          netvsc_set_mac_addr,
        .ndo_select_queue =             netvsc_select_queue,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller =          netvsc_poll_controller,
+#endif
 };
 
 /*
@@ -767,7 +791,9 @@ static void netvsc_link_change(struct work_struct *w)
        struct net_device *net;
        struct netvsc_device *net_device;
        struct rndis_device *rdev;
-       bool notify;
+       bool notify, refresh = false;
+       char *argv[] = { "/etc/init.d/network", "restart", NULL };
+       char *envp[] = { "HOME=/", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
 
        rtnl_lock();
 
@@ -782,10 +808,17 @@ static void netvsc_link_change(struct work_struct *w)
        } else {
                netif_carrier_on(net);
                notify = true;
+               if (rdev->link_change) {
+                       rdev->link_change = false;
+                       refresh = true;
+               }
        }
 
        rtnl_unlock();
 
+       if (refresh)
+               call_usermodehelper(argv[0], argv, envp, UMH_WAIT_EXEC);
+
        if (notify)
                netdev_notify_peers(net);
 }
index 99c527a..2b86f0b 100644 (file)
@@ -320,25 +320,6 @@ static void rndis_filter_receive_response(struct rndis_device *dev,
        }
 }
 
-static void rndis_filter_receive_indicate_status(struct rndis_device *dev,
-                                            struct rndis_message *resp)
-{
-       struct rndis_indicate_status *indicate =
-                       &resp->msg.indicate_status;
-
-       if (indicate->status == RNDIS_STATUS_MEDIA_CONNECT) {
-               netvsc_linkstatus_callback(
-                       dev->net_dev->dev, 1);
-       } else if (indicate->status == RNDIS_STATUS_MEDIA_DISCONNECT) {
-               netvsc_linkstatus_callback(
-                       dev->net_dev->dev, 0);
-       } else {
-               /*
-                * TODO:
-                */
-       }
-}
-
 /*
  * Get the Per-Packet-Info with the specified type
  * return NULL if not found.
@@ -464,7 +445,7 @@ int rndis_filter_receive(struct hv_device *dev,
 
        case RNDIS_MSG_INDICATE:
                /* notification msgs */
-               rndis_filter_receive_indicate_status(rndis_dev, rndis_msg);
+               netvsc_linkstatus_callback(dev, rndis_msg);
                break;
        default:
                netdev_err(ndev,
index 3e89bea..391a916 100644 (file)
@@ -34,6 +34,7 @@ config IEEE802154_AT86RF230
        depends on IEEE802154_DRIVERS && MAC802154
        tristate "AT86RF230/231/233/212 transceiver driver"
        depends on SPI
+       select REGMAP_SPI
        ---help---
          Say Y here to enable the at86rf230/231/233/212 SPI 802.15.4 wireless
          controller.
@@ -51,3 +52,14 @@ config IEEE802154_MRF24J40
 
          This driver can also be built as a module. To do so, say M here.
          the module will be called 'mrf24j40'.
+
+config IEEE802154_CC2520
+       depends on IEEE802154_DRIVERS && MAC802154
+       tristate "CC2520 transceiver driver"
+       depends on SPI
+       ---help---
+         Say Y here to enable the CC2520 SPI 802.15.4 wireless
+         controller.
+
+         This driver can also be built as a module. To do so, say M here.
+         the module will be called 'cc2520'.
index abb0c08..655cb95 100644 (file)
@@ -2,3 +2,4 @@ obj-$(CONFIG_IEEE802154_FAKEHARD) += fakehard.o
 obj-$(CONFIG_IEEE802154_FAKELB) += fakelb.o
 obj-$(CONFIG_IEEE802154_AT86RF230) += at86rf230.o
 obj-$(CONFIG_IEEE802154_MRF24J40) += mrf24j40.o
+obj-$(CONFIG_IEEE802154_CC2520) += cc2520.o
index 4517b14..c9d2a75 100644 (file)
@@ -19,6 +19,7 @@
  * Written by:
  * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
  * Alexander Smirnov <alex.bluesman.smirnov@gmail.com>
+ * Alexander Aring <aar@pengutronix.de>
  */
 #include <linux/kernel.h>
 #include <linux/module.h>
 #include <linux/irq.h>
 #include <linux/gpio.h>
 #include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/workqueue.h>
 #include <linux/spinlock.h>
 #include <linux/spi/spi.h>
 #include <linux/spi/at86rf230.h>
+#include <linux/regmap.h>
 #include <linux/skbuff.h>
 #include <linux/of_gpio.h>
 
+#include <net/ieee802154.h>
 #include <net/mac802154.h>
 #include <net/wpan-phy.h>
 
-struct at86rf230_local {
-       struct spi_device *spi;
+struct at86rf230_local;
+/* at86rf2xx chip depend data.
+ * All timings are in us.
+ */
+struct at86rf2xx_chip_data {
+       u16 t_sleep_cycle;
+       u16 t_channel_switch;
+       u16 t_reset_to_off;
+       u16 t_off_to_aack;
+       u16 t_off_to_tx_on;
+       u16 t_frame;
+       u16 t_p_ack;
+       /* short interframe spacing time */
+       u16 t_sifs;
+       /* long interframe spacing time */
+       u16 t_lifs;
+       /* completion timeout for tx in msecs */
+       u16 t_tx_timeout;
+       int rssi_base_val;
 
-       u8 part;
-       u8 vers;
+       int (*set_channel)(struct at86rf230_local *, int, int);
+       int (*get_desense_steps)(struct at86rf230_local *, s32);
+};
 
-       u8 buf[2];
-       struct mutex bmux;
+#define AT86RF2XX_MAX_BUF (127 + 3)
 
-       struct work_struct irqwork;
-       struct completion tx_complete;
+struct at86rf230_state_change {
+       struct at86rf230_local *lp;
+
+       struct spi_message msg;
+       struct spi_transfer trx;
+       u8 buf[AT86RF2XX_MAX_BUF];
+
+       void (*complete)(void *context);
+       u8 from_state;
+       u8 to_state;
+};
+
+struct at86rf230_local {
+       struct spi_device *spi;
 
        struct ieee802154_dev *dev;
+       struct at86rf2xx_chip_data *data;
+       struct regmap *regmap;
 
-       spinlock_t lock;
-       bool irq_busy;
-       bool is_tx;
-       bool tx_aret;
+       struct completion state_complete;
+       struct at86rf230_state_change state;
 
-       int rssi_base_val;
-};
+       struct at86rf230_state_change irq;
 
-static bool is_rf212(struct at86rf230_local *local)
-{
-       return local->part == 7;
-}
+       bool tx_aret;
+       bool is_tx;
+       /* spinlock for is_tx protection */
+       spinlock_t lock;
+       struct completion tx_complete;
+       struct sk_buff *tx_skb;
+       struct at86rf230_state_change tx;
+};
 
 #define        RG_TRX_STATUS   (0x01)
 #define        SR_TRX_STATUS           0x01, 0x1f, 0
@@ -256,344 +289,753 @@ static bool is_rf212(struct at86rf230_local *local)
 #define STATE_BUSY_RX_AACK_NOCLK 0x1E
 #define STATE_TRANSITION_IN_PROGRESS 0x1F
 
+#define AT86RF2XX_NUMREGS 0x3F
+
 static int
-__at86rf230_detect_device(struct spi_device *spi, u16 *man_id, u8 *part,
-               u8 *version)
+at86rf230_async_state_change(struct at86rf230_local *lp,
+                            struct at86rf230_state_change *ctx,
+                            const u8 state, void (*complete)(void *context));
+
+static inline int
+__at86rf230_write(struct at86rf230_local *lp,
+                 unsigned int addr, unsigned int data)
 {
-       u8 data[4];
-       u8 *buf = kmalloc(2, GFP_KERNEL);
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-               .rx_buf = buf,
-       };
-       u8 reg;
-
-       if (!buf)
-               return -ENOMEM;
+       return regmap_write(lp->regmap, addr, data);
+}
 
-       for (reg = RG_PART_NUM; reg <= RG_MAN_ID_1; reg++) {
-               buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
-               buf[1] = 0xff;
-               dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
-               spi_message_init(&msg);
-               spi_message_add_tail(&xfer, &msg);
+static inline int
+__at86rf230_read(struct at86rf230_local *lp,
+                unsigned int addr, unsigned int *data)
+{
+       return regmap_read(lp->regmap, addr, data);
+}
 
-               status = spi_sync(spi, &msg);
-               dev_vdbg(&spi->dev, "status = %d\n", status);
-               if (msg.status)
-                       status = msg.status;
+static inline int
+at86rf230_read_subreg(struct at86rf230_local *lp,
+                     unsigned int addr, unsigned int mask,
+                     unsigned int shift, unsigned int *data)
+{
+       int rc;
 
-               dev_vdbg(&spi->dev, "status = %d\n", status);
-               dev_vdbg(&spi->dev, "buf[0] = %02x\n", buf[0]);
-               dev_vdbg(&spi->dev, "buf[1] = %02x\n", buf[1]);
+       rc = __at86rf230_read(lp, addr, data);
+       if (rc > 0)
+               *data = (*data & mask) >> shift;
 
-               if (status == 0)
-                       data[reg - RG_PART_NUM] = buf[1];
-               else
-                       break;
+       return rc;
+}
+
+static inline int
+at86rf230_write_subreg(struct at86rf230_local *lp,
+                      unsigned int addr, unsigned int mask,
+                      unsigned int shift, unsigned int data)
+{
+       return regmap_update_bits(lp->regmap, addr, mask, data << shift);
+}
+
+static bool
+at86rf230_reg_writeable(struct device *dev, unsigned int reg)
+{
+       switch (reg) {
+       case RG_TRX_STATE:
+       case RG_TRX_CTRL_0:
+       case RG_TRX_CTRL_1:
+       case RG_PHY_TX_PWR:
+       case RG_PHY_ED_LEVEL:
+       case RG_PHY_CC_CCA:
+       case RG_CCA_THRES:
+       case RG_RX_CTRL:
+       case RG_SFD_VALUE:
+       case RG_TRX_CTRL_2:
+       case RG_ANT_DIV:
+       case RG_IRQ_MASK:
+       case RG_VREG_CTRL:
+       case RG_BATMON:
+       case RG_XOSC_CTRL:
+       case RG_RX_SYN:
+       case RG_XAH_CTRL_1:
+       case RG_FTN_CTRL:
+       case RG_PLL_CF:
+       case RG_PLL_DCU:
+       case RG_SHORT_ADDR_0:
+       case RG_SHORT_ADDR_1:
+       case RG_PAN_ID_0:
+       case RG_PAN_ID_1:
+       case RG_IEEE_ADDR_0:
+       case RG_IEEE_ADDR_1:
+       case RG_IEEE_ADDR_2:
+       case RG_IEEE_ADDR_3:
+       case RG_IEEE_ADDR_4:
+       case RG_IEEE_ADDR_5:
+       case RG_IEEE_ADDR_6:
+       case RG_IEEE_ADDR_7:
+       case RG_XAH_CTRL_0:
+       case RG_CSMA_SEED_0:
+       case RG_CSMA_SEED_1:
+       case RG_CSMA_BE:
+               return true;
+       default:
+               return false;
        }
+}
+
+static bool
+at86rf230_reg_readable(struct device *dev, unsigned int reg)
+{
+       bool rc;
+
+       /* all writeable are also readable */
+       rc = at86rf230_reg_writeable(dev, reg);
+       if (rc)
+               return rc;
 
-       if (status == 0) {
-               *part = data[0];
-               *version = data[1];
-               *man_id = (data[3] << 8) | data[2];
+       /* readonly regs */
+       switch (reg) {
+       case RG_TRX_STATUS:
+       case RG_PHY_RSSI:
+       case RG_IRQ_STATUS:
+       case RG_PART_NUM:
+       case RG_VERSION_NUM:
+       case RG_MAN_ID_1:
+       case RG_MAN_ID_0:
+               return true;
+       default:
+               return false;
        }
+}
 
-       kfree(buf);
+static bool
+at86rf230_reg_volatile(struct device *dev, unsigned int reg)
+{
+       /* can be changed during runtime */
+       switch (reg) {
+       case RG_TRX_STATUS:
+       case RG_TRX_STATE:
+       case RG_PHY_RSSI:
+       case RG_PHY_ED_LEVEL:
+       case RG_IRQ_STATUS:
+       case RG_VREG_CTRL:
+               return true;
+       default:
+               return false;
+       }
+}
 
-       return status;
+static bool
+at86rf230_reg_precious(struct device *dev, unsigned int reg)
+{
+       /* don't clear irq line on read */
+       switch (reg) {
+       case RG_IRQ_STATUS:
+               return true;
+       default:
+               return false;
+       }
 }
 
-static int
-__at86rf230_write(struct at86rf230_local *lp, u8 addr, u8 data)
+static struct regmap_config at86rf230_regmap_spi_config = {
+       .reg_bits = 8,
+       .val_bits = 8,
+       .write_flag_mask = CMD_REG | CMD_WRITE,
+       .read_flag_mask = CMD_REG,
+       .cache_type = REGCACHE_RBTREE,
+       .max_register = AT86RF2XX_NUMREGS,
+       .writeable_reg = at86rf230_reg_writeable,
+       .readable_reg = at86rf230_reg_readable,
+       .volatile_reg = at86rf230_reg_volatile,
+       .precious_reg = at86rf230_reg_precious,
+};
+
+static void
+at86rf230_async_error_recover(void *context)
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-       };
-
-       buf[0] = (addr & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-       buf[1] = data;
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       return status;
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+
+       at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, NULL);
 }
 
-static int
-__at86rf230_read_subreg(struct at86rf230_local *lp,
-                       u8 addr, u8 mask, int shift, u8 *data)
+static void
+at86rf230_async_error(struct at86rf230_local *lp,
+                     struct at86rf230_state_change *ctx, int rc)
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer = {
-               .len    = 2,
-               .tx_buf = buf,
-               .rx_buf = buf,
-       };
-
-       buf[0] = (addr & CMD_REG_MASK) | CMD_REG;
-       buf[1] = 0xff;
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       if (status == 0)
-               *data = (buf[1] & mask) >> shift;
-
-       return status;
+       dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+
+       at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
+                                    at86rf230_async_error_recover);
 }
 
+/* Generic function to get some register value in async mode */
 static int
-at86rf230_read_subreg(struct at86rf230_local *lp,
-                     u8 addr, u8 mask, int shift, u8 *data)
+at86rf230_async_read_reg(struct at86rf230_local *lp, const u8 reg,
+                        struct at86rf230_state_change *ctx,
+                        void (*complete)(void *context))
 {
-       int status;
+       u8 *tx_buf = ctx->buf;
 
-       mutex_lock(&lp->bmux);
-       status = __at86rf230_read_subreg(lp, addr, mask, shift, data);
-       mutex_unlock(&lp->bmux);
+       tx_buf[0] = (reg & CMD_REG_MASK) | CMD_REG;
+       ctx->trx.len = 2;
+       ctx->msg.complete = complete;
+       return spi_async(lp->spi, &ctx->msg);
+}
+
+static void
+at86rf230_async_state_assert(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = ctx->buf;
+       const u8 trx_state = buf[1] & 0x1f;
+
+       /* Assert state change */
+       if (trx_state != ctx->to_state) {
+               /* Special handling if transceiver state is in
+                * STATE_BUSY_RX_AACK and a SHR was detected.
+                */
+               if  (trx_state == STATE_BUSY_RX_AACK) {
+                       /* Undocumented race condition. If we send a state
+                        * change to STATE_RX_AACK_ON the transceiver could
+                        * change his state automatically to STATE_BUSY_RX_AACK
+                        * if a SHR was detected. This is not an error, but we
+                        * can't assert this.
+                        */
+                       if (ctx->to_state == STATE_RX_AACK_ON)
+                               goto done;
+
+                       /* If we change to STATE_TX_ON without forcing and
+                        * transceiver state is STATE_BUSY_RX_AACK, we wait
+                        * 'tFrame + tPAck' receiving time. In this time the
+                        * PDU should be received. If the transceiver is still
+                        * in STATE_BUSY_RX_AACK, we run a force state change
+                        * to STATE_TX_ON. This is a timeout handling, if the
+                        * transceiver stucks in STATE_BUSY_RX_AACK.
+                        */
+                       if (ctx->to_state == STATE_TX_ON) {
+                               at86rf230_async_state_change(lp, ctx,
+                                                            STATE_FORCE_TX_ON,
+                                                            ctx->complete);
+                               return;
+                       }
+               }
+
+
+               dev_warn(&lp->spi->dev, "unexcept state change from 0x%02x to 0x%02x. Actual state: 0x%02x\n",
+                        ctx->from_state, ctx->to_state, trx_state);
+       }
 
-       return status;
+done:
+       if (ctx->complete)
+               ctx->complete(context);
 }
 
-static int
-at86rf230_write_subreg(struct at86rf230_local *lp,
-                      u8 addr, u8 mask, int shift, u8 data)
+/* Do state change timing delay. */
+static void
+at86rf230_async_state_delay(void *context)
 {
-       int status;
-       u8 val;
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       struct at86rf2xx_chip_data *c = lp->data;
+       bool force = false;
+       int rc;
+
+       /* The force state changes are will show as normal states in the
+        * state status subregister. We change the to_state to the
+        * corresponding one and remember if it was a force change, this
+        * differs if we do a state change from STATE_BUSY_RX_AACK.
+        */
+       switch (ctx->to_state) {
+       case STATE_FORCE_TX_ON:
+               ctx->to_state = STATE_TX_ON;
+               force = true;
+               break;
+       case STATE_FORCE_TRX_OFF:
+               ctx->to_state = STATE_TRX_OFF;
+               force = true;
+               break;
+       default:
+               break;
+       }
+
+       switch (ctx->from_state) {
+       case STATE_TRX_OFF:
+               switch (ctx->to_state) {
+               case STATE_RX_AACK_ON:
+                       usleep_range(c->t_off_to_aack, c->t_off_to_aack + 10);
+                       goto change;
+               case STATE_TX_ON:
+                       usleep_range(c->t_off_to_tx_on,
+                                    c->t_off_to_tx_on + 10);
+                       goto change;
+               default:
+                       break;
+               }
+               break;
+       case STATE_BUSY_RX_AACK:
+               switch (ctx->to_state) {
+               case STATE_TX_ON:
+                       /* Wait for worst case receiving time if we
+                        * didn't make a force change from BUSY_RX_AACK
+                        * to TX_ON.
+                        */
+                       if (!force) {
+                               usleep_range(c->t_frame + c->t_p_ack,
+                                            c->t_frame + c->t_p_ack + 1000);
+                               goto change;
+                       }
+                       break;
+               default:
+                       break;
+               }
+               break;
+       /* Default value, means RESET state */
+       case STATE_P_ON:
+               switch (ctx->to_state) {
+               case STATE_TRX_OFF:
+                       usleep_range(c->t_reset_to_off, c->t_reset_to_off + 10);
+                       goto change;
+               default:
+                       break;
+               }
+               break;
+       default:
+               break;
+       }
+
+       /* Default delay is 1us in the most cases */
+       udelay(1);
+
+change:
+       rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+                                     at86rf230_async_state_assert);
+       if (rc)
+               dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+}
+
+static void
+at86rf230_async_state_change_start(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       u8 *buf = ctx->buf;
+       const u8 trx_state = buf[1] & 0x1f;
+       int rc;
 
-       mutex_lock(&lp->bmux);
-       status = __at86rf230_read_subreg(lp, addr, 0xff, 0, &val);
-       if (status)
-               goto out;
+       /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
+       if (trx_state == STATE_TRANSITION_IN_PROGRESS) {
+               udelay(1);
+               rc = at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+                                             at86rf230_async_state_change_start);
+               if (rc)
+                       dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
+               return;
+       }
 
-       val &= ~mask;
-       val |= (data << shift) & mask;
+       /* Check if we already are in the state which we change in */
+       if (trx_state == ctx->to_state) {
+               if (ctx->complete)
+                       ctx->complete(context);
+               return;
+       }
 
-       status = __at86rf230_write(lp, addr, val);
-out:
-       mutex_unlock(&lp->bmux);
+       /* Set current state to the context of state change */
+       ctx->from_state = trx_state;
 
-       return status;
+       /* Going into the next step for a state change which do a timing
+        * relevant delay.
+        */
+       buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+       buf[1] = ctx->to_state;
+       ctx->trx.len = 2;
+       ctx->msg.complete = at86rf230_async_state_delay;
+       rc = spi_async(lp->spi, &ctx->msg);
+       if (rc)
+               dev_err(&lp->spi->dev, "spi_async error %d\n", rc);
 }
 
 static int
-at86rf230_write_fbuf(struct at86rf230_local *lp, u8 *data, u8 len)
+at86rf230_async_state_change(struct at86rf230_local *lp,
+                            struct at86rf230_state_change *ctx,
+                            const u8 state, void (*complete)(void *context))
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer_head = {
-               .len            = 2,
-               .tx_buf         = buf,
-
-       };
-       struct spi_transfer xfer_buf = {
-               .len            = len,
-               .tx_buf         = data,
-       };
-
-       mutex_lock(&lp->bmux);
-       buf[0] = CMD_WRITE | CMD_FB;
-       buf[1] = len + 2; /* 2 bytes for CRC that isn't written */
-
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head, &msg);
-       spi_message_add_tail(&xfer_buf, &msg);
-
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       if (msg.status)
-               status = msg.status;
-
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
-
-       mutex_unlock(&lp->bmux);
-       return status;
+       /* Initialization for the state change context */
+       ctx->to_state = state;
+       ctx->complete = complete;
+       return at86rf230_async_read_reg(lp, RG_TRX_STATUS, ctx,
+                                       at86rf230_async_state_change_start);
 }
 
+static void
+at86rf230_sync_state_change_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+
+       complete(&lp->state_complete);
+}
+
+/* This function do a sync framework above the async state change.
+ * Some callbacks of the IEEE 802.15.4 driver interface need to be
+ * handled synchronously.
+ */
 static int
-at86rf230_read_fbuf(struct at86rf230_local *lp, u8 *data, u8 *len, u8 *lqi)
+at86rf230_sync_state_change(struct at86rf230_local *lp, unsigned int state)
 {
-       u8 *buf = lp->buf;
-       int status;
-       struct spi_message msg;
-       struct spi_transfer xfer_head = {
-               .len            = 2,
-               .tx_buf         = buf,
-               .rx_buf         = buf,
-       };
-       struct spi_transfer xfer_head1 = {
-               .len            = 2,
-               .tx_buf         = buf,
-               .rx_buf         = buf,
-       };
-       struct spi_transfer xfer_buf = {
-               .len            = 0,
-               .rx_buf         = data,
-       };
-
-       mutex_lock(&lp->bmux);
+       int rc;
 
-       buf[0] = CMD_FB;
-       buf[1] = 0x00;
+       rc = at86rf230_async_state_change(lp, &lp->state, state,
+                                         at86rf230_sync_state_change_complete);
+       if (rc) {
+               at86rf230_async_error(lp, &lp->state, rc);
+               return rc;
+       }
 
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head, &msg);
+       rc = wait_for_completion_timeout(&lp->state_complete,
+                                        msecs_to_jiffies(100));
+       if (!rc)
+               return -ETIMEDOUT;
 
-       status = spi_sync(lp->spi, &msg);
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
+       return 0;
+}
+
+static void
+at86rf230_tx_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
 
-       xfer_buf.len = *(buf + 1) + 1;
-       *len = buf[1];
+       complete(&lp->tx_complete);
+}
 
-       buf[0] = CMD_FB;
-       buf[1] = 0x00;
+static void
+at86rf230_tx_on(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
 
-       spi_message_init(&msg);
-       spi_message_add_tail(&xfer_head1, &msg);
-       spi_message_add_tail(&xfer_buf, &msg);
+       rc = at86rf230_async_state_change(lp, &lp->irq, STATE_RX_AACK_ON,
+                                         at86rf230_tx_complete);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
 
-       status = spi_sync(lp->spi, &msg);
+static void
+at86rf230_tx_trac_error(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
 
-       if (msg.status)
-               status = msg.status;
+       rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+                                         at86rf230_tx_on);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
 
-       dev_vdbg(&lp->spi->dev, "status = %d\n", status);
-       dev_vdbg(&lp->spi->dev, "buf[0] = %02x\n", buf[0]);
-       dev_vdbg(&lp->spi->dev, "buf[1] = %02x\n", buf[1]);
+static void
+at86rf230_tx_trac_check(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = ctx->buf;
+       const u8 trac = (buf[1] & 0xe0) >> 5;
+       int rc;
 
-       if (status) {
-               if (lqi && (*len > lp->buf[1]))
-                       *lqi = data[lp->buf[1]];
+       /* If trac status is different than zero we need to do a state change
+        * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
+        * state to TX_ON.
+        */
+       if (trac) {
+               rc = at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
+                                                 at86rf230_tx_trac_error);
+               if (rc)
+                       at86rf230_async_error(lp, ctx, rc);
+               return;
        }
-       mutex_unlock(&lp->bmux);
 
-       return status;
+       at86rf230_tx_on(context);
 }
 
-static int
-at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+
+static void
+at86rf230_tx_trac_status(void *context)
 {
-       might_sleep();
-       BUG_ON(!level);
-       *level = 0xbe;
-       return 0;
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
+
+       rc = at86rf230_async_read_reg(lp, RG_TRX_STATE, ctx,
+                                     at86rf230_tx_trac_check);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
+
+static void
+at86rf230_rx(struct at86rf230_local *lp,
+            const u8 *data, u8 len)
+{
+       u8 lqi;
+       struct sk_buff *skb;
+       u8 rx_local_buf[AT86RF2XX_MAX_BUF];
+
+       if (len < 2)
+               return;
+
+       /* read full frame buffer and invalid lqi value to lowest
+        * indicator if frame was is in a corrupted state.
+        */
+       if (len > IEEE802154_MTU) {
+               lqi = 0;
+               len = IEEE802154_MTU;
+               dev_vdbg(&lp->spi->dev, "corrupted frame received\n");
+       } else {
+               lqi = data[len];
+       }
+
+       memcpy(rx_local_buf, data, len);
+       enable_irq(lp->spi->irq);
+
+       skb = alloc_skb(IEEE802154_MTU, GFP_ATOMIC);
+       if (!skb) {
+               dev_vdbg(&lp->spi->dev, "failed to allocate sk_buff\n");
+               return;
+       }
+
+       memcpy(skb_put(skb, len), rx_local_buf, len);
+
+       /* We do not put CRC into the frame */
+       skb_trim(skb, len - 2);
+
+       ieee802154_rx_irqsafe(lp->dev, skb, lqi);
+}
+
+static void
+at86rf230_rx_read_frame_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = lp->irq.buf;
+       const u8 len = buf[1];
+
+       at86rf230_rx(lp, buf + 2, len);
 }
 
 static int
-at86rf230_state(struct ieee802154_dev *dev, int state)
+at86rf230_rx_read_frame(struct at86rf230_local *lp)
 {
-       struct at86rf230_local *lp = dev->priv;
+       u8 *buf = lp->irq.buf;
+
+       buf[0] = CMD_FB;
+       lp->irq.trx.len = AT86RF2XX_MAX_BUF;
+       lp->irq.msg.complete = at86rf230_rx_read_frame_complete;
+       return spi_async(lp->spi, &lp->irq.msg);
+}
+
+static void
+at86rf230_rx_trac_check(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
        int rc;
-       u8 val;
-       u8 desired_status;
 
-       might_sleep();
+       /* Possible check on trac status here. This could be useful to make
+        * some stats why receive is failed. Not used at the moment, but it's
+        * maybe timing relevant. Datasheet doesn't say anything about this.
+        * The programming guide say do it so.
+        */
 
-       if (state == STATE_FORCE_TX_ON)
-               desired_status = STATE_TX_ON;
-       else if (state == STATE_FORCE_TRX_OFF)
-               desired_status = STATE_TRX_OFF;
-       else
-               desired_status = state;
+       rc = at86rf230_rx_read_frame(lp);
+       if (rc) {
+               enable_irq(lp->spi->irq);
+               at86rf230_async_error(lp, ctx, rc);
+       }
+}
 
-       do {
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
+static int
+at86rf230_irq_trx_end(struct at86rf230_local *lp)
+{
+       spin_lock(&lp->lock);
+       if (lp->is_tx) {
+               lp->is_tx = 0;
+               spin_unlock(&lp->lock);
+               enable_irq(lp->spi->irq);
+
+               if (lp->tx_aret)
+                       return at86rf230_async_state_change(lp, &lp->irq,
+                                                           STATE_FORCE_TX_ON,
+                                                           at86rf230_tx_trac_status);
+               else
+                       return at86rf230_async_state_change(lp, &lp->irq,
+                                                           STATE_RX_AACK_ON,
+                                                           at86rf230_tx_complete);
+       } else {
+               spin_unlock(&lp->lock);
+               return at86rf230_async_read_reg(lp, RG_TRX_STATE, &lp->irq,
+                                               at86rf230_rx_trac_check);
+       }
+}
+
+static void
+at86rf230_irq_status(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       const u8 *buf = lp->irq.buf;
+       const u8 irq = buf[1];
+       int rc;
+
+       if (irq & IRQ_TRX_END) {
+               rc = at86rf230_irq_trx_end(lp);
                if (rc)
-                       goto err;
-       } while (val == STATE_TRANSITION_IN_PROGRESS);
+                       at86rf230_async_error(lp, ctx, rc);
+       } else {
+               enable_irq(lp->spi->irq);
+               dev_err(&lp->spi->dev, "not supported irq %02x received\n",
+                       irq);
+       }
+}
 
-       if (val == desired_status)
-               return 0;
+static irqreturn_t at86rf230_isr(int irq, void *data)
+{
+       struct at86rf230_local *lp = data;
+       struct at86rf230_state_change *ctx = &lp->irq;
+       u8 *buf = ctx->buf;
+       int rc;
 
-       /* state is equal to phy states */
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, state);
-       if (rc)
-               goto err;
+       disable_irq_nosync(lp->spi->irq);
 
-       do {
-               rc = at86rf230_read_subreg(lp, SR_TRX_STATUS, &val);
-               if (rc)
-                       goto err;
-       } while (val == STATE_TRANSITION_IN_PROGRESS);
+       buf[0] = (RG_IRQ_STATUS & CMD_REG_MASK) | CMD_REG;
+       ctx->trx.len = 2;
+       ctx->msg.complete = at86rf230_irq_status;
+       rc = spi_async(lp->spi, &ctx->msg);
+       if (rc) {
+               at86rf230_async_error(lp, ctx, rc);
+               return IRQ_NONE;
+       }
+
+       return IRQ_HANDLED;
+}
+
+static void
+at86rf230_write_frame_complete(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       u8 *buf = ctx->buf;
+       int rc;
 
+       buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+       buf[1] = STATE_BUSY_TX;
+       ctx->trx.len = 2;
+       ctx->msg.complete = NULL;
+       rc = spi_async(lp->spi, &ctx->msg);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
 
-       if (val == desired_status ||
-           (desired_status == STATE_RX_ON && val == STATE_BUSY_RX) ||
-           (desired_status == STATE_RX_AACK_ON && val == STATE_BUSY_RX_AACK))
-               return 0;
+static void
+at86rf230_write_frame(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       struct sk_buff *skb = lp->tx_skb;
+       u8 *buf = lp->tx.buf;
+       int rc;
 
-       pr_err("unexpected state change: %d, asked for %d\n", val, state);
-       return -EBUSY;
+       spin_lock(&lp->lock);
+       lp->is_tx = 1;
+       spin_unlock(&lp->lock);
+
+       buf[0] = CMD_FB | CMD_WRITE;
+       buf[1] = skb->len + 2;
+       memcpy(buf + 2, skb->data, skb->len);
+       lp->tx.trx.len = skb->len + 2;
+       lp->tx.msg.complete = at86rf230_write_frame_complete;
+       rc = spi_async(lp->spi, &lp->tx.msg);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
+}
 
-err:
-       pr_err("error: %d\n", rc);
-       return rc;
+static void
+at86rf230_xmit_tx_on(void *context)
+{
+       struct at86rf230_state_change *ctx = context;
+       struct at86rf230_local *lp = ctx->lp;
+       int rc;
+
+       rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+                                         at86rf230_write_frame);
+       if (rc)
+               at86rf230_async_error(lp, ctx, rc);
 }
 
 static int
-at86rf230_start(struct ieee802154_dev *dev)
+at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
 {
        struct at86rf230_local *lp = dev->priv;
-       u8 rc;
+       struct at86rf230_state_change *ctx = &lp->tx;
 
-       rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
-       if (rc)
-               return rc;
+       void (*tx_complete)(void *context) = at86rf230_write_frame;
+       int rc;
 
-       rc = at86rf230_state(dev, STATE_TX_ON);
-       if (rc)
+       lp->tx_skb = skb;
+
+       /* In ARET mode we need to go into STATE_TX_ARET_ON after we
+        * are in STATE_TX_ON. The pfad differs here, so we change
+        * the complete handler.
+        */
+       if (lp->tx_aret)
+               tx_complete = at86rf230_xmit_tx_on;
+
+       rc = at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+                                         tx_complete);
+       if (rc) {
+               at86rf230_async_error(lp, ctx, rc);
                return rc;
+       }
+       rc = wait_for_completion_interruptible_timeout(&lp->tx_complete,
+                                                      msecs_to_jiffies(lp->data->t_tx_timeout));
+       if (!rc) {
+               at86rf230_async_error(lp, ctx, rc);
+               return -ETIMEDOUT;
+       }
+
+       /* Interfame spacing time, which is phy depend.
+        * TODO
+        * Move this handling in MAC 802.15.4 layer.
+        * This is currently a workaround to avoid fragmenation issues.
+        */
+       if (skb->len > 18)
+               usleep_range(lp->data->t_lifs, lp->data->t_lifs + 10);
+       else
+               usleep_range(lp->data->t_sifs, lp->data->t_sifs + 10);
 
-       return at86rf230_state(dev, STATE_RX_AACK_ON);
+       return 0;
+}
+
+static int
+at86rf230_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       might_sleep();
+       BUG_ON(!level);
+       *level = 0xbe;
+       return 0;
+}
+
+static int
+at86rf230_start(struct ieee802154_dev *dev)
+{
+       return at86rf230_sync_state_change(dev->priv, STATE_RX_AACK_ON);
 }
 
 static void
 at86rf230_stop(struct ieee802154_dev *dev)
 {
-       at86rf230_state(dev, STATE_FORCE_TRX_OFF);
+       at86rf230_sync_state_change(dev->priv, STATE_FORCE_TRX_OFF);
 }
 
 static int
-at86rf230_set_channel(struct at86rf230_local *lp, int page, int channel)
+at86rf23x_set_channel(struct at86rf230_local *lp, int page, int channel)
 {
-       lp->rssi_base_val = -91;
-
        return at86rf230_write_subreg(lp, SR_CHANNEL, channel);
 }
 
@@ -611,10 +1053,10 @@ at86rf212_set_channel(struct at86rf230_local *lp, int page, int channel)
 
        if (page == 0) {
                rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 0);
-               lp->rssi_base_val = -100;
+               lp->data->rssi_base_val = -100;
        } else {
                rc = at86rf230_write_subreg(lp, SR_BPSK_QPSK, 1);
-               lp->rssi_base_val = -98;
+               lp->data->rssi_base_val = -98;
        }
        if (rc < 0)
                return rc;
@@ -636,106 +1078,19 @@ at86rf230_channel(struct ieee802154_dev *dev, int page, int channel)
                return -EINVAL;
        }
 
-       if (is_rf212(lp))
-               rc = at86rf212_set_channel(lp, page, channel);
-       else
-               rc = at86rf230_set_channel(lp, page, channel);
+       rc = lp->data->set_channel(lp, page, channel);
        if (rc < 0)
                return rc;
 
-       msleep(1); /* Wait for PLL */
+       /* Wait for PLL */
+       usleep_range(lp->data->t_channel_switch,
+                    lp->data->t_channel_switch + 10);
        dev->phy->current_channel = channel;
        dev->phy->current_page = page;
 
        return 0;
 }
 
-static int
-at86rf230_xmit(struct ieee802154_dev *dev, struct sk_buff *skb)
-{
-       struct at86rf230_local *lp = dev->priv;
-       int rc;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->lock, flags);
-       if  (lp->irq_busy) {
-               spin_unlock_irqrestore(&lp->lock, flags);
-               return -EBUSY;
-       }
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       might_sleep();
-
-       rc = at86rf230_state(dev, STATE_FORCE_TX_ON);
-       if (rc)
-               goto err;
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->is_tx = 1;
-       reinit_completion(&lp->tx_complete);
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       rc = at86rf230_write_fbuf(lp, skb->data, skb->len);
-       if (rc)
-               goto err_rx;
-
-       if (lp->tx_aret) {
-               rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_TX_ARET_ON);
-               if (rc)
-                       goto err_rx;
-       }
-
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_BUSY_TX);
-       if (rc)
-               goto err_rx;
-
-       rc = wait_for_completion_interruptible(&lp->tx_complete);
-       if (rc < 0)
-               goto err_rx;
-
-       return at86rf230_start(dev);
-err_rx:
-       at86rf230_start(dev);
-err:
-       pr_err("error: %d\n", rc);
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->is_tx = 0;
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       return rc;
-}
-
-static int at86rf230_rx(struct at86rf230_local *lp)
-{
-       u8 len = 128, lqi = 0;
-       struct sk_buff *skb;
-
-       skb = alloc_skb(len, GFP_KERNEL);
-
-       if (!skb)
-               return -ENOMEM;
-
-       if (at86rf230_read_fbuf(lp, skb_put(skb, len), &len, &lqi))
-               goto err;
-
-       if (len < 2)
-               goto err;
-
-       skb_trim(skb, len - 2); /* We do not put CRC into the frame */
-
-       ieee802154_rx_irqsafe(lp->dev, skb, lqi);
-
-       dev_dbg(&lp->spi->dev, "READ_FBUF: %d %x\n", len, lqi);
-
-       return 0;
-err:
-       pr_debug("received frame is too small\n");
-
-       kfree_skb(skb);
-       return -EINVAL;
-}
-
 static int
 at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
                           struct ieee802154_hw_addr_filt *filt,
@@ -784,7 +1139,7 @@ at86rf230_set_hw_addr_filt(struct ieee802154_dev *dev,
 }
 
 static int
-at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
+at86rf230_set_txpower(struct ieee802154_dev *dev, int db)
 {
        struct at86rf230_local *lp = dev->priv;
 
@@ -803,7 +1158,7 @@ at86rf212_set_txpower(struct ieee802154_dev *dev, int db)
 }
 
 static int
-at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
+at86rf230_set_lbt(struct ieee802154_dev *dev, bool on)
 {
        struct at86rf230_local *lp = dev->priv;
 
@@ -811,7 +1166,7 @@ at86rf212_set_lbt(struct ieee802154_dev *dev, bool on)
 }
 
 static int
-at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
+at86rf230_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
 {
        struct at86rf230_local *lp = dev->priv;
 
@@ -819,21 +1174,31 @@ at86rf212_set_cca_mode(struct ieee802154_dev *dev, u8 mode)
 }
 
 static int
-at86rf212_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
+at86rf212_get_desens_steps(struct at86rf230_local *lp, s32 level)
+{
+       return (level - lp->data->rssi_base_val) * 100 / 207;
+}
+
+static int
+at86rf23x_get_desens_steps(struct at86rf230_local *lp, s32 level)
+{
+       return (level - lp->data->rssi_base_val) / 2;
+}
+
+static int
+at86rf230_set_cca_ed_level(struct ieee802154_dev *dev, s32 level)
 {
        struct at86rf230_local *lp = dev->priv;
-       int desens_steps;
 
-       if (level < lp->rssi_base_val || level > 30)
+       if (level < lp->data->rssi_base_val || level > 30)
                return -EINVAL;
 
-       desens_steps = (level - lp->rssi_base_val) * 100 / 207;
-
-       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES, desens_steps);
+       return at86rf230_write_subreg(lp, SR_CCA_ED_THRES,
+                                     lp->data->get_desense_steps(lp, level));
 }
 
 static int
-at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
+at86rf230_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
                          u8 retries)
 {
        struct at86rf230_local *lp = dev->priv;
@@ -854,7 +1219,7 @@ at86rf212_set_csma_params(struct ieee802154_dev *dev, u8 min_be, u8 max_be,
 }
 
 static int
-at86rf212_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
+at86rf230_set_frame_retries(struct ieee802154_dev *dev, s8 retries)
 {
        struct at86rf230_local *lp = dev->priv;
        int rc = 0;
@@ -878,110 +1243,84 @@ static struct ieee802154_ops at86rf230_ops = {
        .start = at86rf230_start,
        .stop = at86rf230_stop,
        .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
+       .set_txpower = at86rf230_set_txpower,
+       .set_lbt = at86rf230_set_lbt,
+       .set_cca_mode = at86rf230_set_cca_mode,
+       .set_cca_ed_level = at86rf230_set_cca_ed_level,
+       .set_csma_params = at86rf230_set_csma_params,
+       .set_frame_retries = at86rf230_set_frame_retries,
 };
 
-static struct ieee802154_ops at86rf212_ops = {
-       .owner = THIS_MODULE,
-       .xmit = at86rf230_xmit,
-       .ed = at86rf230_ed,
-       .set_channel = at86rf230_channel,
-       .start = at86rf230_start,
-       .stop = at86rf230_stop,
-       .set_hw_addr_filt = at86rf230_set_hw_addr_filt,
-       .set_txpower = at86rf212_set_txpower,
-       .set_lbt = at86rf212_set_lbt,
-       .set_cca_mode = at86rf212_set_cca_mode,
-       .set_cca_ed_level = at86rf212_set_cca_ed_level,
-       .set_csma_params = at86rf212_set_csma_params,
-       .set_frame_retries = at86rf212_set_frame_retries,
+static struct at86rf2xx_chip_data at86rf233_data = {
+       .t_sleep_cycle = 330,
+       .t_channel_switch = 11,
+       .t_reset_to_off = 26,
+       .t_off_to_aack = 80,
+       .t_off_to_tx_on = 80,
+       .t_frame = 4096,
+       .t_p_ack = 545,
+       .t_sifs = 192,
+       .t_lifs = 480,
+       .t_tx_timeout = 2000,
+       .rssi_base_val = -91,
+       .set_channel = at86rf23x_set_channel,
+       .get_desense_steps = at86rf23x_get_desens_steps
 };
 
-static void at86rf230_irqwork(struct work_struct *work)
-{
-       struct at86rf230_local *lp =
-               container_of(work, struct at86rf230_local, irqwork);
-       u8 status = 0, val;
-       int rc;
-       unsigned long flags;
-
-       rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &val);
-       status |= val;
-
-       status &= ~IRQ_PLL_LOCK; /* ignore */
-       status &= ~IRQ_RX_START; /* ignore */
-       status &= ~IRQ_AMI; /* ignore */
-       status &= ~IRQ_TRX_UR; /* FIXME: possibly handle ???*/
-
-       if (status & IRQ_TRX_END) {
-               status &= ~IRQ_TRX_END;
-               spin_lock_irqsave(&lp->lock, flags);
-               if (lp->is_tx) {
-                       lp->is_tx = 0;
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       complete(&lp->tx_complete);
-               } else {
-                       spin_unlock_irqrestore(&lp->lock, flags);
-                       at86rf230_rx(lp);
-               }
-       }
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->irq_busy = 0;
-       spin_unlock_irqrestore(&lp->lock, flags);
-}
-
-static void at86rf230_irqwork_level(struct work_struct *work)
-{
-       struct at86rf230_local *lp =
-               container_of(work, struct at86rf230_local, irqwork);
-
-       at86rf230_irqwork(work);
-
-       enable_irq(lp->spi->irq);
-}
-
-static irqreturn_t at86rf230_isr(int irq, void *data)
-{
-       struct at86rf230_local *lp = data;
-       unsigned long flags;
-
-       spin_lock_irqsave(&lp->lock, flags);
-       lp->irq_busy = 1;
-       spin_unlock_irqrestore(&lp->lock, flags);
-
-       schedule_work(&lp->irqwork);
-
-       return IRQ_HANDLED;
-}
-
-static irqreturn_t at86rf230_isr_level(int irq, void *data)
-{
-       disable_irq_nosync(irq);
+static struct at86rf2xx_chip_data at86rf231_data = {
+       .t_sleep_cycle = 330,
+       .t_channel_switch = 24,
+       .t_reset_to_off = 37,
+       .t_off_to_aack = 110,
+       .t_off_to_tx_on = 110,
+       .t_frame = 4096,
+       .t_p_ack = 545,
+       .t_sifs = 192,
+       .t_lifs = 480,
+       .t_tx_timeout = 2000,
+       .rssi_base_val = -91,
+       .set_channel = at86rf23x_set_channel,
+       .get_desense_steps = at86rf23x_get_desens_steps
+};
 
-       return at86rf230_isr(irq, data);
-}
+static struct at86rf2xx_chip_data at86rf212_data = {
+       .t_sleep_cycle = 330,
+       .t_channel_switch = 11,
+       .t_reset_to_off = 26,
+       .t_off_to_aack = 200,
+       .t_off_to_tx_on = 200,
+       .t_frame = 4096,
+       .t_p_ack = 545,
+       .t_sifs = 192,
+       .t_lifs = 480,
+       .t_tx_timeout = 2000,
+       .rssi_base_val = -100,
+       .set_channel = at86rf212_set_channel,
+       .get_desense_steps = at86rf212_get_desens_steps
+};
 
 static int at86rf230_hw_init(struct at86rf230_local *lp)
 {
-       int rc, irq_pol, irq_type;
-       u8 dvdd;
+       int rc, irq_type, irq_pol = IRQ_ACTIVE_HIGH;
+       unsigned int dvdd;
        u8 csma_seed[2];
 
-       rc = at86rf230_write_subreg(lp, SR_TRX_CMD, STATE_FORCE_TRX_OFF);
+       rc = at86rf230_sync_state_change(lp, STATE_FORCE_TRX_OFF);
        if (rc)
                return rc;
 
        irq_type = irq_get_trigger_type(lp->spi->irq);
-       /* configure irq polarity, defaults to high active */
-       if (irq_type & (IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW))
+       if (irq_type == IRQ_TYPE_EDGE_FALLING)
                irq_pol = IRQ_ACTIVE_LOW;
-       else
-               irq_pol = IRQ_ACTIVE_HIGH;
 
        rc = at86rf230_write_subreg(lp, SR_IRQ_POLARITY, irq_pol);
        if (rc)
                return rc;
 
+       rc = at86rf230_write_subreg(lp, SR_RX_SAFE_MODE, 1);
+       if (rc)
+               return rc;
+
        rc = at86rf230_write_subreg(lp, SR_IRQ_MASK, IRQ_TRX_END);
        if (rc)
                return rc;
@@ -1004,7 +1343,8 @@ static int at86rf230_hw_init(struct at86rf230_local *lp)
        if (rc)
                return rc;
        /* Wait the next SLEEP cycle */
-       msleep(100);
+       usleep_range(lp->data->t_sleep_cycle,
+                    lp->data->t_sleep_cycle + 100);
 
        rc = at86rf230_read_subreg(lp, SR_DVDD_OK, &dvdd);
        if (rc)
@@ -1037,18 +1377,111 @@ done:
        return pdata;
 }
 
+static int
+at86rf230_detect_device(struct at86rf230_local *lp)
+{
+       unsigned int part, version, val;
+       u16 man_id = 0;
+       const char *chip;
+       int rc;
+
+       rc = __at86rf230_read(lp, RG_MAN_ID_0, &val);
+       if (rc)
+               return rc;
+       man_id |= val;
+
+       rc = __at86rf230_read(lp, RG_MAN_ID_1, &val);
+       if (rc)
+               return rc;
+       man_id |= (val << 8);
+
+       rc = __at86rf230_read(lp, RG_PART_NUM, &part);
+       if (rc)
+               return rc;
+
+       rc = __at86rf230_read(lp, RG_PART_NUM, &version);
+       if (rc)
+               return rc;
+
+       if (man_id != 0x001f) {
+               dev_err(&lp->spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
+                       man_id >> 8, man_id & 0xFF);
+               return -EINVAL;
+       }
+
+       lp->dev->extra_tx_headroom = 0;
+       lp->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK |
+                        IEEE802154_HW_TXPOWER | IEEE802154_HW_CSMA;
+
+       switch (part) {
+       case 2:
+               chip = "at86rf230";
+               rc = -ENOTSUPP;
+               break;
+       case 3:
+               chip = "at86rf231";
+               lp->data = &at86rf231_data;
+               lp->dev->phy->channels_supported[0] = 0x7FFF800;
+               break;
+       case 7:
+               chip = "at86rf212";
+               if (version == 1) {
+                       lp->data = &at86rf212_data;
+                       lp->dev->flags |= IEEE802154_HW_LBT;
+                       lp->dev->phy->channels_supported[0] = 0x00007FF;
+                       lp->dev->phy->channels_supported[2] = 0x00007FF;
+               } else {
+                       rc = -ENOTSUPP;
+               }
+               break;
+       case 11:
+               chip = "at86rf233";
+               lp->data = &at86rf233_data;
+               lp->dev->phy->channels_supported[0] = 0x7FFF800;
+               break;
+       default:
+               chip = "unkown";
+               rc = -ENOTSUPP;
+               break;
+       }
+
+       dev_info(&lp->spi->dev, "Detected %s chip version %d\n", chip, version);
+
+       return rc;
+}
+
+static void
+at86rf230_setup_spi_messages(struct at86rf230_local *lp)
+{
+       lp->state.lp = lp;
+       spi_message_init(&lp->state.msg);
+       lp->state.msg.context = &lp->state;
+       lp->state.trx.tx_buf = lp->state.buf;
+       lp->state.trx.rx_buf = lp->state.buf;
+       spi_message_add_tail(&lp->state.trx, &lp->state.msg);
+
+       lp->irq.lp = lp;
+       spi_message_init(&lp->irq.msg);
+       lp->irq.msg.context = &lp->irq;
+       lp->irq.trx.tx_buf = lp->irq.buf;
+       lp->irq.trx.rx_buf = lp->irq.buf;
+       spi_message_add_tail(&lp->irq.trx, &lp->irq.msg);
+
+       lp->tx.lp = lp;
+       spi_message_init(&lp->tx.msg);
+       lp->tx.msg.context = &lp->tx;
+       lp->tx.trx.tx_buf = lp->tx.buf;
+       lp->tx.trx.rx_buf = lp->tx.buf;
+       spi_message_add_tail(&lp->tx.trx, &lp->tx.msg);
+}
+
 static int at86rf230_probe(struct spi_device *spi)
 {
        struct at86rf230_platform_data *pdata;
        struct ieee802154_dev *dev;
        struct at86rf230_local *lp;
-       u16 man_id = 0;
-       u8 part = 0, version = 0, status;
-       irq_handler_t irq_handler;
-       work_func_t irq_worker;
+       unsigned int status;
        int rc, irq_type;
-       const char *chip;
-       struct ieee802154_ops *ops = NULL;
 
        if (!spi->irq) {
                dev_err(&spi->dev, "no IRQ specified\n");
@@ -1084,104 +1517,60 @@ static int at86rf230_probe(struct spi_device *spi)
                usleep_range(120, 240);
        }
 
-       rc = __at86rf230_detect_device(spi, &man_id, &part, &version);
-       if (rc < 0)
-               return rc;
-
-       if (man_id != 0x001f) {
-               dev_err(&spi->dev, "Non-Atmel dev found (MAN_ID %02x %02x)\n",
-                       man_id >> 8, man_id & 0xFF);
-               return -EINVAL;
-       }
-
-       switch (part) {
-       case 2:
-               chip = "at86rf230";
-               /* FIXME: should be easy to support; */
-               break;
-       case 3:
-               chip = "at86rf231";
-               ops = &at86rf230_ops;
-               break;
-       case 7:
-               chip = "at86rf212";
-               if (version == 1)
-                       ops = &at86rf212_ops;
-               break;
-       case 11:
-               chip = "at86rf233";
-               ops = &at86rf230_ops;
-               break;
-       default:
-               chip = "UNKNOWN";
-               break;
-       }
-
-       dev_info(&spi->dev, "Detected %s chip version %d\n", chip, version);
-       if (!ops)
-               return -ENOTSUPP;
-
-       dev = ieee802154_alloc_device(sizeof(*lp), ops);
+       dev = ieee802154_alloc_device(sizeof(*lp), &at86rf230_ops);
        if (!dev)
                return -ENOMEM;
 
        lp = dev->priv;
        lp->dev = dev;
-       lp->part = part;
-       lp->vers = version;
-
        lp->spi = spi;
-
        dev->parent = &spi->dev;
-       dev->extra_tx_headroom = 0;
-       dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
 
-       irq_type = irq_get_trigger_type(spi->irq);
-       if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
-               irq_worker = at86rf230_irqwork;
-               irq_handler = at86rf230_isr;
-       } else {
-               irq_worker = at86rf230_irqwork_level;
-               irq_handler = at86rf230_isr_level;
+       lp->regmap = devm_regmap_init_spi(spi, &at86rf230_regmap_spi_config);
+       if (IS_ERR(lp->regmap)) {
+               rc = PTR_ERR(lp->regmap);
+               dev_err(&spi->dev, "Failed to allocate register map: %d\n",
+                       rc);
+               goto free_dev;
        }
 
-       mutex_init(&lp->bmux);
-       INIT_WORK(&lp->irqwork, irq_worker);
+       at86rf230_setup_spi_messages(lp);
+
+       rc = at86rf230_detect_device(lp);
+       if (rc < 0)
+               goto free_dev;
+
        spin_lock_init(&lp->lock);
        init_completion(&lp->tx_complete);
+       init_completion(&lp->state_complete);
 
        spi_set_drvdata(spi, lp);
 
-       if (is_rf212(lp)) {
-               dev->phy->channels_supported[0] = 0x00007FF;
-               dev->phy->channels_supported[2] = 0x00007FF;
-       } else {
-               dev->phy->channels_supported[0] = 0x7FFF800;
-       }
-
        rc = at86rf230_hw_init(lp);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
 
        /* Read irq status register to reset irq line */
        rc = at86rf230_read_subreg(lp, RG_IRQ_STATUS, 0xff, 0, &status);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
+
+       irq_type = irq_get_trigger_type(spi->irq);
+       if (!irq_type)
+               irq_type = IRQF_TRIGGER_RISING;
 
-       rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED,
-                             dev_name(&spi->dev), lp);
+       rc = devm_request_irq(&spi->dev, spi->irq, at86rf230_isr,
+                             IRQF_SHARED | irq_type, dev_name(&spi->dev), lp);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
 
        rc = ieee802154_register_device(lp->dev);
        if (rc)
-               goto err_hw_init;
+               goto free_dev;
 
        return rc;
 
-err_hw_init:
-       flush_work(&lp->irqwork);
-       mutex_destroy(&lp->bmux);
+free_dev:
        ieee802154_free_device(lp->dev);
 
        return rc;
@@ -1194,8 +1583,6 @@ static int at86rf230_remove(struct spi_device *spi)
        /* mask all at86rf230 irq's */
        at86rf230_write_subreg(lp, SR_IRQ_MASK, 0);
        ieee802154_unregister_device(lp->dev);
-       flush_work(&lp->irqwork);
-       mutex_destroy(&lp->bmux);
        ieee802154_free_device(lp->dev);
        dev_dbg(&spi->dev, "unregistered at86rf230\n");
 
diff --git a/drivers/net/ieee802154/cc2520.c b/drivers/net/ieee802154/cc2520.c
new file mode 100644 (file)
index 0000000..8a5ac7a
--- /dev/null
@@ -0,0 +1,1039 @@
+/* Driver for TI CC2520 802.15.4 Wireless-PAN Networking controller
+ *
+ * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
+ *                   Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
+ *                   P Sowjanya <sowjanyap@cdac.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/gpio.h>
+#include <linux/delay.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/cc2520.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/skbuff.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/of_gpio.h>
+
+#include <net/mac802154.h>
+#include <net/wpan-phy.h>
+#include <net/ieee802154.h>
+
+#define        SPI_COMMAND_BUFFER      3
+#define        HIGH                    1
+#define        LOW                     0
+#define        STATE_IDLE              0
+#define        RSSI_VALID              0
+#define        RSSI_OFFSET             78
+
+#define        CC2520_RAM_SIZE         640
+#define        CC2520_FIFO_SIZE        128
+
+#define        CC2520RAM_TXFIFO        0x100
+#define        CC2520RAM_RXFIFO        0x180
+#define        CC2520RAM_IEEEADDR      0x3EA
+#define        CC2520RAM_PANID         0x3F2
+#define        CC2520RAM_SHORTADDR     0x3F4
+
+#define        CC2520_FREG_MASK        0x3F
+
+/* status byte values */
+#define        CC2520_STATUS_XOSC32M_STABLE    (1 << 7)
+#define        CC2520_STATUS_RSSI_VALID        (1 << 6)
+#define        CC2520_STATUS_TX_UNDERFLOW      (1 << 3)
+
+/* IEEE-802.15.4 defined constants (2.4 GHz logical channels) */
+#define        CC2520_MINCHANNEL               11
+#define        CC2520_MAXCHANNEL               26
+#define        CC2520_CHANNEL_SPACING          5
+
+/* command strobes */
+#define        CC2520_CMD_SNOP                 0x00
+#define        CC2520_CMD_IBUFLD               0x02
+#define        CC2520_CMD_SIBUFEX              0x03
+#define        CC2520_CMD_SSAMPLECCA           0x04
+#define        CC2520_CMD_SRES                 0x0f
+#define        CC2520_CMD_MEMORY_MASK          0x0f
+#define        CC2520_CMD_MEMORY_READ          0x10
+#define        CC2520_CMD_MEMORY_WRITE         0x20
+#define        CC2520_CMD_RXBUF                0x30
+#define        CC2520_CMD_RXBUFCP              0x38
+#define        CC2520_CMD_RXBUFMOV             0x32
+#define        CC2520_CMD_TXBUF                0x3A
+#define        CC2520_CMD_TXBUFCP              0x3E
+#define        CC2520_CMD_RANDOM               0x3C
+#define        CC2520_CMD_SXOSCON              0x40
+#define        CC2520_CMD_STXCAL               0x41
+#define        CC2520_CMD_SRXON                0x42
+#define        CC2520_CMD_STXON                0x43
+#define        CC2520_CMD_STXONCCA             0x44
+#define        CC2520_CMD_SRFOFF               0x45
+#define        CC2520_CMD_SXOSCOFF             0x46
+#define        CC2520_CMD_SFLUSHRX             0x47
+#define        CC2520_CMD_SFLUSHTX             0x48
+#define        CC2520_CMD_SACK                 0x49
+#define        CC2520_CMD_SACKPEND             0x4A
+#define        CC2520_CMD_SNACK                0x4B
+#define        CC2520_CMD_SRXMASKBITSET        0x4C
+#define        CC2520_CMD_SRXMASKBITCLR        0x4D
+#define        CC2520_CMD_RXMASKAND            0x4E
+#define        CC2520_CMD_RXMASKOR             0x4F
+#define        CC2520_CMD_MEMCP                0x50
+#define        CC2520_CMD_MEMCPR               0x52
+#define        CC2520_CMD_MEMXCP               0x54
+#define        CC2520_CMD_MEMXWR               0x56
+#define        CC2520_CMD_BCLR                 0x58
+#define        CC2520_CMD_BSET                 0x59
+#define        CC2520_CMD_CTR_UCTR             0x60
+#define        CC2520_CMD_CBCMAC               0x64
+#define        CC2520_CMD_UCBCMAC              0x66
+#define        CC2520_CMD_CCM                  0x68
+#define        CC2520_CMD_UCCM                 0x6A
+#define        CC2520_CMD_ECB                  0x70
+#define        CC2520_CMD_ECBO                 0x72
+#define        CC2520_CMD_ECBX                 0x74
+#define        CC2520_CMD_INC                  0x78
+#define        CC2520_CMD_ABORT                0x7F
+#define        CC2520_CMD_REGISTER_READ        0x80
+#define        CC2520_CMD_REGISTER_WRITE       0xC0
+
+/* status registers */
+#define        CC2520_CHIPID                   0x40
+#define        CC2520_VERSION                  0x42
+#define        CC2520_EXTCLOCK                 0x44
+#define        CC2520_MDMCTRL0                 0x46
+#define        CC2520_MDMCTRL1                 0x47
+#define        CC2520_FREQEST                  0x48
+#define        CC2520_RXCTRL                   0x4A
+#define        CC2520_FSCTRL                   0x4C
+#define        CC2520_FSCAL0                   0x4E
+#define        CC2520_FSCAL1                   0x4F
+#define        CC2520_FSCAL2                   0x50
+#define        CC2520_FSCAL3                   0x51
+#define        CC2520_AGCCTRL0                 0x52
+#define        CC2520_AGCCTRL1                 0x53
+#define        CC2520_AGCCTRL2                 0x54
+#define        CC2520_AGCCTRL3                 0x55
+#define        CC2520_ADCTEST0                 0x56
+#define        CC2520_ADCTEST1                 0x57
+#define        CC2520_ADCTEST2                 0x58
+#define        CC2520_MDMTEST0                 0x5A
+#define        CC2520_MDMTEST1                 0x5B
+#define        CC2520_DACTEST0                 0x5C
+#define        CC2520_DACTEST1                 0x5D
+#define        CC2520_ATEST                    0x5E
+#define        CC2520_DACTEST2                 0x5F
+#define        CC2520_PTEST0                   0x60
+#define        CC2520_PTEST1                   0x61
+#define        CC2520_RESERVED                 0x62
+#define        CC2520_DPUBIST                  0x7A
+#define        CC2520_ACTBIST                  0x7C
+#define        CC2520_RAMBIST                  0x7E
+
+/* frame registers */
+#define        CC2520_FRMFILT0                 0x00
+#define        CC2520_FRMFILT1                 0x01
+#define        CC2520_SRCMATCH                 0x02
+#define        CC2520_SRCSHORTEN0              0x04
+#define        CC2520_SRCSHORTEN1              0x05
+#define        CC2520_SRCSHORTEN2              0x06
+#define        CC2520_SRCEXTEN0                0x08
+#define        CC2520_SRCEXTEN1                0x09
+#define        CC2520_SRCEXTEN2                0x0A
+#define        CC2520_FRMCTRL0                 0x0C
+#define        CC2520_FRMCTRL1                 0x0D
+#define        CC2520_RXENABLE0                0x0E
+#define        CC2520_RXENABLE1                0x0F
+#define        CC2520_EXCFLAG0                 0x10
+#define        CC2520_EXCFLAG1                 0x11
+#define        CC2520_EXCFLAG2                 0x12
+#define        CC2520_EXCMASKA0                0x14
+#define        CC2520_EXCMASKA1                0x15
+#define        CC2520_EXCMASKA2                0x16
+#define        CC2520_EXCMASKB0                0x18
+#define        CC2520_EXCMASKB1                0x19
+#define        CC2520_EXCMASKB2                0x1A
+#define        CC2520_EXCBINDX0                0x1C
+#define        CC2520_EXCBINDX1                0x1D
+#define        CC2520_EXCBINDY0                0x1E
+#define        CC2520_EXCBINDY1                0x1F
+#define        CC2520_GPIOCTRL0                0x20
+#define        CC2520_GPIOCTRL1                0x21
+#define        CC2520_GPIOCTRL2                0x22
+#define        CC2520_GPIOCTRL3                0x23
+#define        CC2520_GPIOCTRL4                0x24
+#define        CC2520_GPIOCTRL5                0x25
+#define        CC2520_GPIOPOLARITY             0x26
+#define        CC2520_GPIOCTRL                 0x28
+#define        CC2520_DPUCON                   0x2A
+#define        CC2520_DPUSTAT                  0x2C
+#define        CC2520_FREQCTRL                 0x2E
+#define        CC2520_FREQTUNE                 0x2F
+#define        CC2520_TXPOWER                  0x30
+#define        CC2520_TXCTRL                   0x31
+#define        CC2520_FSMSTAT0                 0x32
+#define        CC2520_FSMSTAT1                 0x33
+#define        CC2520_FIFOPCTRL                0x34
+#define        CC2520_FSMCTRL                  0x35
+#define        CC2520_CCACTRL0                 0x36
+#define        CC2520_CCACTRL1                 0x37
+#define        CC2520_RSSI                     0x38
+#define        CC2520_RSSISTAT                 0x39
+#define        CC2520_RXFIRST                  0x3C
+#define        CC2520_RXFIFOCNT                0x3E
+#define        CC2520_TXFIFOCNT                0x3F
+
+/* Driver private information */
+struct cc2520_private {
+       struct spi_device *spi;         /* SPI device structure */
+       struct ieee802154_dev *dev;     /* IEEE-802.15.4 device */
+       u8 *buf;                        /* SPI TX/Rx data buffer */
+       struct mutex buffer_mutex;      /* SPI buffer mutex */
+       bool is_tx;                     /* Flag for sync b/w Tx and Rx */
+       int fifo_pin;                   /* FIFO GPIO pin number */
+       struct work_struct fifop_irqwork;/* Workqueue for FIFOP */
+       spinlock_t lock;                /* Lock for is_tx*/
+       struct completion tx_complete;  /* Work completion for Tx */
+};
+
+/* Generic Functions */
+static int
+cc2520_cmd_strobe(struct cc2520_private *priv, u8 cmd)
+{
+       int ret;
+       u8 status = 0xff;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer.len++] = cmd;
+       dev_vdbg(&priv->spi->dev,
+                "command strobe buf[0] = %02x\n",
+                priv->buf[0]);
+
+       ret = spi_sync(priv->spi, &msg);
+       if (!ret)
+               status = priv->buf[0];
+       dev_vdbg(&priv->spi->dev,
+                "buf[0] = %02x\n", priv->buf[0]);
+       mutex_unlock(&priv->buffer_mutex);
+
+       return ret;
+}
+
+static int
+cc2520_get_status(struct cc2520_private *priv, u8 *status)
+{
+       int ret;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer.len++] = CC2520_CMD_SNOP;
+       dev_vdbg(&priv->spi->dev,
+                "get status command buf[0] = %02x\n", priv->buf[0]);
+
+       ret = spi_sync(priv->spi, &msg);
+       if (!ret)
+               *status = priv->buf[0];
+       dev_vdbg(&priv->spi->dev,
+                "buf[0] = %02x\n", priv->buf[0]);
+       mutex_unlock(&priv->buffer_mutex);
+
+       return ret;
+}
+
+static int
+cc2520_write_register(struct cc2520_private *priv, u8 reg, u8 value)
+{
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+
+       if (reg <= CC2520_FREG_MASK) {
+               priv->buf[xfer.len++] = CC2520_CMD_REGISTER_WRITE | reg;
+               priv->buf[xfer.len++] = value;
+       } else {
+               priv->buf[xfer.len++] = CC2520_CMD_MEMORY_WRITE;
+               priv->buf[xfer.len++] = reg;
+               priv->buf[xfer.len++] = value;
+       }
+       status = spi_sync(priv->spi, &msg);
+       if (msg.status)
+               status = msg.status;
+
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int
+cc2520_write_ram(struct cc2520_private *priv, u16 reg, u8 len, u8 *data)
+{
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer_head = {
+               .len        = 0,
+               .tx_buf        = priv->buf,
+               .rx_buf        = priv->buf,
+       };
+
+       struct spi_transfer xfer_buf = {
+               .len = len,
+               .tx_buf = data,
+       };
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer_head.len++] = (CC2520_CMD_MEMORY_WRITE |
+                                               ((reg >> 8) & 0xff));
+       priv->buf[xfer_head.len++] = reg & 0xff;
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       status = spi_sync(priv->spi, &msg);
+       dev_dbg(&priv->spi->dev, "spi status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+
+       mutex_unlock(&priv->buffer_mutex);
+       return status;
+}
+
+static int
+cc2520_read_register(struct cc2520_private *priv, u8 reg, u8 *data)
+{
+       int status;
+       struct spi_message msg;
+       struct spi_transfer xfer1 = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+
+       struct spi_transfer xfer2 = {
+               .len = 1,
+               .rx_buf = data,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer1, &msg);
+       spi_message_add_tail(&xfer2, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer1.len++] = CC2520_CMD_MEMORY_READ;
+       priv->buf[xfer1.len++] = reg;
+
+       status = spi_sync(priv->spi, &msg);
+       dev_dbg(&priv->spi->dev,
+               "spi status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int
+cc2520_write_txfifo(struct cc2520_private *priv, u8 *data, u8 len)
+{
+       int status;
+
+       /* length byte must include FCS even
+        * if it is calculated in the hardware
+        */
+       int len_byte = len + 2;
+
+       struct spi_message msg;
+
+       struct spi_transfer xfer_head = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+       struct spi_transfer xfer_len = {
+               .len = 1,
+               .tx_buf = &len_byte,
+       };
+       struct spi_transfer xfer_buf = {
+               .len = len,
+               .tx_buf = data,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+       spi_message_add_tail(&xfer_len, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer_head.len++] = CC2520_CMD_TXBUF;
+       dev_vdbg(&priv->spi->dev,
+                "TX_FIFO cmd buf[0] = %02x\n", priv->buf[0]);
+
+       status = spi_sync(priv->spi, &msg);
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       dev_vdbg(&priv->spi->dev, "buf[0] = %02x\n", priv->buf[0]);
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int
+cc2520_read_rxfifo(struct cc2520_private *priv, u8 *data, u8 len, u8 *lqi)
+{
+       int status;
+       struct spi_message msg;
+
+       struct spi_transfer xfer_head = {
+               .len = 0,
+               .tx_buf = priv->buf,
+               .rx_buf = priv->buf,
+       };
+       struct spi_transfer xfer_buf = {
+               .len = len,
+               .rx_buf = data,
+       };
+
+       spi_message_init(&msg);
+       spi_message_add_tail(&xfer_head, &msg);
+       spi_message_add_tail(&xfer_buf, &msg);
+
+       mutex_lock(&priv->buffer_mutex);
+       priv->buf[xfer_head.len++] = CC2520_CMD_RXBUF;
+
+       dev_vdbg(&priv->spi->dev, "read rxfifo buf[0] = %02x\n", priv->buf[0]);
+       dev_vdbg(&priv->spi->dev, "buf[1] = %02x\n", priv->buf[1]);
+
+       status = spi_sync(priv->spi, &msg);
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       if (msg.status)
+               status = msg.status;
+       dev_vdbg(&priv->spi->dev, "status = %d\n", status);
+       dev_vdbg(&priv->spi->dev,
+                "return status buf[0] = %02x\n", priv->buf[0]);
+       dev_vdbg(&priv->spi->dev, "length buf[1] = %02x\n", priv->buf[1]);
+
+       mutex_unlock(&priv->buffer_mutex);
+
+       return status;
+}
+
+static int cc2520_start(struct ieee802154_dev *dev)
+{
+       return cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRXON);
+}
+
+static void cc2520_stop(struct ieee802154_dev *dev)
+{
+       cc2520_cmd_strobe(dev->priv, CC2520_CMD_SRFOFF);
+}
+
+static int
+cc2520_tx(struct ieee802154_dev *dev, struct sk_buff *skb)
+{
+       struct cc2520_private *priv = dev->priv;
+       unsigned long flags;
+       int rc;
+       u8 status = 0;
+
+       rc = cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX);
+       if (rc)
+               goto err_tx;
+
+       rc = cc2520_write_txfifo(priv, skb->data, skb->len);
+       if (rc)
+               goto err_tx;
+
+       rc = cc2520_get_status(priv, &status);
+       if (rc)
+               goto err_tx;
+
+       if (status & CC2520_STATUS_TX_UNDERFLOW) {
+               dev_err(&priv->spi->dev, "cc2520 tx underflow exception\n");
+               goto err_tx;
+       }
+
+       spin_lock_irqsave(&priv->lock, flags);
+       BUG_ON(priv->is_tx);
+       priv->is_tx = 1;
+       spin_unlock_irqrestore(&priv->lock, flags);
+
+       rc = cc2520_cmd_strobe(priv, CC2520_CMD_STXONCCA);
+       if (rc)
+               goto err;
+
+       rc = wait_for_completion_interruptible(&priv->tx_complete);
+       if (rc < 0)
+               goto err;
+
+       cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHTX);
+       cc2520_cmd_strobe(priv, CC2520_CMD_SRXON);
+
+       return rc;
+err:
+       spin_lock_irqsave(&priv->lock, flags);
+       priv->is_tx = 0;
+       spin_unlock_irqrestore(&priv->lock, flags);
+err_tx:
+       return rc;
+}
+
+
+static int cc2520_rx(struct cc2520_private *priv)
+{
+       u8 len = 0, lqi = 0, bytes = 1;
+       struct sk_buff *skb;
+
+       cc2520_read_rxfifo(priv, &len, bytes, &lqi);
+
+       if (len < 2 || len > IEEE802154_MTU)
+               return -EINVAL;
+
+       skb = alloc_skb(len, GFP_KERNEL);
+       if (!skb)
+               return -ENOMEM;
+
+       if (cc2520_read_rxfifo(priv, skb_put(skb, len), len, &lqi)) {
+               dev_dbg(&priv->spi->dev, "frame reception failed\n");
+               kfree_skb(skb);
+               return -EINVAL;
+       }
+
+       skb_trim(skb, skb->len - 2);
+
+       ieee802154_rx_irqsafe(priv->dev, skb, lqi);
+
+       dev_vdbg(&priv->spi->dev, "RXFIFO: %x %x\n", len, lqi);
+
+       return 0;
+}
+
+static int
+cc2520_ed(struct ieee802154_dev *dev, u8 *level)
+{
+       struct cc2520_private *priv = dev->priv;
+       u8 status = 0xff;
+       u8 rssi;
+       int ret;
+
+       ret = cc2520_read_register(priv , CC2520_RSSISTAT, &status);
+       if (ret)
+               return ret;
+
+       if (status != RSSI_VALID)
+               return -EINVAL;
+
+       ret = cc2520_read_register(priv , CC2520_RSSI, &rssi);
+       if (ret)
+               return ret;
+
+       /* level = RSSI(rssi) - OFFSET [dBm] : offset is 76dBm */
+       *level = rssi - RSSI_OFFSET;
+
+       return 0;
+}
+
+static int
+cc2520_set_channel(struct ieee802154_dev *dev, int page, int channel)
+{
+       struct cc2520_private *priv = dev->priv;
+       int ret;
+
+       might_sleep();
+       dev_dbg(&priv->spi->dev, "trying to set channel\n");
+
+       BUG_ON(page != 0);
+       BUG_ON(channel < CC2520_MINCHANNEL);
+       BUG_ON(channel > CC2520_MAXCHANNEL);
+
+       ret = cc2520_write_register(priv, CC2520_FREQCTRL,
+                                   11 + 5*(channel - 11));
+
+       return ret;
+}
+
+static int
+cc2520_filter(struct ieee802154_dev *dev,
+             struct ieee802154_hw_addr_filt *filt, unsigned long changed)
+{
+       struct cc2520_private *priv = dev->priv;
+
+       if (changed & IEEE802515_AFILT_PANID_CHANGED) {
+               u16 panid = le16_to_cpu(filt->pan_id);
+
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for pan id\n");
+               cc2520_write_ram(priv, CC2520RAM_PANID,
+                                sizeof(panid), (u8 *)&panid);
+       }
+
+       if (changed & IEEE802515_AFILT_IEEEADDR_CHANGED) {
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for IEEE addr\n");
+               cc2520_write_ram(priv, CC2520RAM_IEEEADDR,
+                                sizeof(filt->ieee_addr),
+                                (u8 *)&filt->ieee_addr);
+       }
+
+       if (changed & IEEE802515_AFILT_SADDR_CHANGED) {
+               u16 addr = le16_to_cpu(filt->short_addr);
+
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for saddr\n");
+               cc2520_write_ram(priv, CC2520RAM_SHORTADDR,
+                                sizeof(addr), (u8 *)&addr);
+       }
+
+       if (changed & IEEE802515_AFILT_PANC_CHANGED) {
+               dev_vdbg(&priv->spi->dev,
+                        "cc2520_filter called for panc change\n");
+               if (filt->pan_coord)
+                       cc2520_write_register(priv, CC2520_FRMFILT0, 0x02);
+               else
+                       cc2520_write_register(priv, CC2520_FRMFILT0, 0x00);
+       }
+
+       return 0;
+}
+
+static struct ieee802154_ops cc2520_ops = {
+       .owner = THIS_MODULE,
+       .start = cc2520_start,
+       .stop = cc2520_stop,
+       .xmit = cc2520_tx,
+       .ed = cc2520_ed,
+       .set_channel = cc2520_set_channel,
+       .set_hw_addr_filt = cc2520_filter,
+};
+
+static int cc2520_register(struct cc2520_private *priv)
+{
+       int ret = -ENOMEM;
+
+       priv->dev = ieee802154_alloc_device(sizeof(*priv), &cc2520_ops);
+       if (!priv->dev)
+               goto err_ret;
+
+       priv->dev->priv = priv;
+       priv->dev->parent = &priv->spi->dev;
+       priv->dev->extra_tx_headroom = 0;
+
+       /* We do support only 2.4 Ghz */
+       priv->dev->phy->channels_supported[0] = 0x7FFF800;
+       priv->dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
+
+       dev_vdbg(&priv->spi->dev, "registered cc2520\n");
+       ret = ieee802154_register_device(priv->dev);
+       if (ret)
+               goto err_free_device;
+
+       return 0;
+
+err_free_device:
+       ieee802154_free_device(priv->dev);
+err_ret:
+       return ret;
+}
+
+static void cc2520_fifop_irqwork(struct work_struct *work)
+{
+       struct cc2520_private *priv
+               = container_of(work, struct cc2520_private, fifop_irqwork);
+
+       dev_dbg(&priv->spi->dev, "fifop interrupt received\n");
+
+       if (gpio_get_value(priv->fifo_pin))
+               cc2520_rx(priv);
+       else
+               dev_dbg(&priv->spi->dev, "rxfifo overflow\n");
+
+       cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHRX);
+       cc2520_cmd_strobe(priv, CC2520_CMD_SFLUSHRX);
+}
+
+static irqreturn_t cc2520_fifop_isr(int irq, void *data)
+{
+       struct cc2520_private *priv = data;
+
+       schedule_work(&priv->fifop_irqwork);
+
+       return IRQ_HANDLED;
+}
+
+static irqreturn_t cc2520_sfd_isr(int irq, void *data)
+{
+       struct cc2520_private *priv = data;
+       unsigned long flags;
+
+       spin_lock_irqsave(&priv->lock, flags);
+       if (priv->is_tx) {
+               priv->is_tx = 0;
+               spin_unlock_irqrestore(&priv->lock, flags);
+               dev_dbg(&priv->spi->dev, "SFD for TX\n");
+               complete(&priv->tx_complete);
+       } else {
+               spin_unlock_irqrestore(&priv->lock, flags);
+               dev_dbg(&priv->spi->dev, "SFD for RX\n");
+       }
+
+       return IRQ_HANDLED;
+}
+
+static int cc2520_hw_init(struct cc2520_private *priv)
+{
+       u8 status = 0, state = 0xff;
+       int ret;
+       int timeout = 100;
+
+       ret = cc2520_read_register(priv, CC2520_FSMSTAT1, &state);
+       if (ret)
+               goto err_ret;
+
+       if (state != STATE_IDLE)
+               return -EINVAL;
+
+       do {
+               ret = cc2520_get_status(priv, &status);
+               if (ret)
+                       goto err_ret;
+
+               if (timeout-- <= 0) {
+                       dev_err(&priv->spi->dev, "oscillator start failed!\n");
+                       return ret;
+               }
+               udelay(1);
+       } while (!(status & CC2520_STATUS_XOSC32M_STABLE));
+
+       dev_vdbg(&priv->spi->dev, "oscillator brought up\n");
+
+       /* Registers default value: section 28.1 in Datasheet */
+       ret = cc2520_write_register(priv, CC2520_TXPOWER, 0xF7);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_CCACTRL0, 0x1A);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_MDMCTRL0, 0x85);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_MDMCTRL1, 0x14);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_RXCTRL, 0x3f);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FSCTRL, 0x5a);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FSCAL1, 0x2b);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_AGCCTRL1, 0x11);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_ADCTEST0, 0x10);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_ADCTEST1, 0x0e);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_ADCTEST2, 0x03);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FRMCTRL0, 0x60);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FRMCTRL1, 0x03);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FRMFILT0, 0x00);
+       if (ret)
+               goto err_ret;
+
+       ret = cc2520_write_register(priv, CC2520_FIFOPCTRL, 127);
+       if (ret)
+               goto err_ret;
+
+       return 0;
+
+err_ret:
+       return ret;
+}
+
+static struct cc2520_platform_data *
+cc2520_get_platform_data(struct spi_device *spi)
+{
+       struct cc2520_platform_data *pdata;
+       struct device_node *np = spi->dev.of_node;
+       struct cc2520_private *priv = spi_get_drvdata(spi);
+
+       if (!np)
+               return spi->dev.platform_data;
+
+       pdata = devm_kzalloc(&spi->dev, sizeof(*pdata), GFP_KERNEL);
+       if (!pdata)
+               goto done;
+
+       pdata->fifo = of_get_named_gpio(np, "fifo-gpio", 0);
+       priv->fifo_pin = pdata->fifo;
+
+       pdata->fifop = of_get_named_gpio(np, "fifop-gpio", 0);
+
+       pdata->sfd = of_get_named_gpio(np, "sfd-gpio", 0);
+       pdata->cca = of_get_named_gpio(np, "cca-gpio", 0);
+       pdata->vreg = of_get_named_gpio(np, "vreg-gpio", 0);
+       pdata->reset = of_get_named_gpio(np, "reset-gpio", 0);
+
+       spi->dev.platform_data = pdata;
+
+done:
+       return pdata;
+}
+
+static int cc2520_probe(struct spi_device *spi)
+{
+       struct cc2520_private *priv;
+       struct pinctrl *pinctrl;
+       struct cc2520_platform_data *pdata;
+       int ret;
+
+       priv = devm_kzalloc(&spi->dev,
+                           sizeof(struct cc2520_private), GFP_KERNEL);
+       if (!priv) {
+               ret = -ENOMEM;
+               goto err_ret;
+       }
+
+       spi_set_drvdata(spi, priv);
+
+       pinctrl = devm_pinctrl_get_select_default(&spi->dev);
+       if (IS_ERR(pinctrl))
+               dev_warn(&spi->dev,
+                        "pinctrl pins are not configured");
+
+       pdata = cc2520_get_platform_data(spi);
+       if (!pdata) {
+               dev_err(&spi->dev, "no platform data\n");
+               return -EINVAL;
+       }
+
+       priv->spi = spi;
+
+       priv->buf = devm_kzalloc(&spi->dev,
+                                SPI_COMMAND_BUFFER, GFP_KERNEL);
+       if (!priv->buf) {
+               ret = -ENOMEM;
+               goto err_ret;
+       }
+
+       mutex_init(&priv->buffer_mutex);
+       INIT_WORK(&priv->fifop_irqwork, cc2520_fifop_irqwork);
+       spin_lock_init(&priv->lock);
+       init_completion(&priv->tx_complete);
+
+       /* Request all the gpio's */
+       if (!gpio_is_valid(pdata->fifo)) {
+               dev_err(&spi->dev, "fifo gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->fifo,
+                                   GPIOF_IN, "fifo");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->cca)) {
+               dev_err(&spi->dev, "cca gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->cca,
+                                   GPIOF_IN, "cca");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->fifop)) {
+               dev_err(&spi->dev, "fifop gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->fifop,
+                                   GPIOF_IN, "fifop");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->sfd)) {
+               dev_err(&spi->dev, "sfd gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->sfd,
+                                   GPIOF_IN, "sfd");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->reset)) {
+               dev_err(&spi->dev, "reset gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->reset,
+                                   GPIOF_OUT_INIT_LOW, "reset");
+       if (ret)
+               goto err_hw_init;
+
+       if (!gpio_is_valid(pdata->vreg)) {
+               dev_err(&spi->dev, "vreg gpio is not valid\n");
+               ret = -EINVAL;
+               goto err_hw_init;
+       }
+
+       ret = devm_gpio_request_one(&spi->dev, pdata->vreg,
+                                   GPIOF_OUT_INIT_LOW, "vreg");
+       if (ret)
+               goto err_hw_init;
+
+
+       gpio_set_value(pdata->vreg, HIGH);
+       usleep_range(100, 150);
+
+       gpio_set_value(pdata->reset, HIGH);
+       usleep_range(200, 250);
+
+       ret = cc2520_hw_init(priv);
+       if (ret)
+               goto err_hw_init;
+
+       /* Set up fifop interrupt */
+       ret = devm_request_irq(&spi->dev,
+                              gpio_to_irq(pdata->fifop),
+                              cc2520_fifop_isr,
+                              IRQF_TRIGGER_RISING,
+                              dev_name(&spi->dev),
+                              priv);
+       if (ret) {
+               dev_err(&spi->dev, "could not get fifop irq\n");
+               goto err_hw_init;
+       }
+
+       /* Set up sfd interrupt */
+       ret = devm_request_irq(&spi->dev,
+                              gpio_to_irq(pdata->sfd),
+                              cc2520_sfd_isr,
+                              IRQF_TRIGGER_FALLING,
+                              dev_name(&spi->dev),
+                              priv);
+       if (ret) {
+               dev_err(&spi->dev, "could not get sfd irq\n");
+               goto err_hw_init;
+       }
+
+       ret = cc2520_register(priv);
+       if (ret)
+               goto err_hw_init;
+
+       return 0;
+
+err_hw_init:
+       mutex_destroy(&priv->buffer_mutex);
+       flush_work(&priv->fifop_irqwork);
+
+err_ret:
+       return ret;
+}
+
+static int cc2520_remove(struct spi_device *spi)
+{
+       struct cc2520_private *priv = spi_get_drvdata(spi);
+
+       mutex_destroy(&priv->buffer_mutex);
+       flush_work(&priv->fifop_irqwork);
+
+       ieee802154_unregister_device(priv->dev);
+       ieee802154_free_device(priv->dev);
+
+       return 0;
+}
+
+static const struct spi_device_id cc2520_ids[] = {
+       {"cc2520", },
+       {},
+};
+MODULE_DEVICE_TABLE(spi, cc2520_ids);
+
+static const struct of_device_id cc2520_of_ids[] = {
+       {.compatible = "ti,cc2520", },
+       {},
+};
+MODULE_DEVICE_TABLE(of, cc2520_of_ids);
+
+/* SPI driver structure */
+static struct spi_driver cc2520_driver = {
+       .driver = {
+               .name = "cc2520",
+               .bus = &spi_bus_type,
+               .owner = THIS_MODULE,
+               .of_match_table = of_match_ptr(cc2520_of_ids),
+       },
+       .id_table = cc2520_ids,
+       .probe = cc2520_probe,
+       .remove = cc2520_remove,
+};
+module_spi_driver(cc2520_driver);
+
+MODULE_AUTHOR("Varka Bhadram <varkab@cdac.in>");
+MODULE_DESCRIPTION("CC2520 Transceiver Driver");
+MODULE_LICENSE("GPL v2");
index 78f18be..9ce854f 100644 (file)
@@ -343,7 +343,8 @@ static int ieee802154fake_probe(struct platform_device *pdev)
        if (!phy)
                return -ENOMEM;
 
-       dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
+       dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d",
+                          NET_NAME_UNKNOWN, ieee802154_fake_setup);
        if (!dev) {
                wpan_phy_free(phy);
                return -ENOMEM;
index 4048062..9e6a124 100644 (file)
@@ -610,10 +610,95 @@ out:
        return IRQ_HANDLED;
 }
 
+static int mrf24j40_hw_init(struct mrf24j40 *devrec)
+{
+       int ret;
+       u8 val;
+
+       /* Initialize the device.
+               From datasheet section 3.2: Initialization. */
+       ret = write_short_reg(devrec, REG_SOFTRST, 0x07);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_PACON2, 0x98);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_TXSTBL, 0x95);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON0, 0x03);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON1, 0x01);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON2, 0x80);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON6, 0x90);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON7, 0x80);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_RFCON8, 0x10);
+       if (ret)
+               goto err_ret;
+
+       ret = write_long_reg(devrec, REG_SLPCON1, 0x21);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_BBREG2, 0x80);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_CCAEDTH, 0x60);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_BBREG6, 0x40);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_RFCTL, 0x04);
+       if (ret)
+               goto err_ret;
+
+       ret = write_short_reg(devrec, REG_RFCTL, 0x0);
+       if (ret)
+               goto err_ret;
+
+       udelay(192);
+
+       /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
+       ret = read_short_reg(devrec, REG_RXMCR, &val);
+       if (ret)
+               goto err_ret;
+
+       val &= ~0x3; /* Clear RX mode (normal) */
+
+       ret = write_short_reg(devrec, REG_RXMCR, val);
+       if (ret)
+               goto err_ret;
+
+       return 0;
+
+err_ret:
+       return ret;
+}
+
 static int mrf24j40_probe(struct spi_device *spi)
 {
        int ret = -ENOMEM;
-       u8 val;
        struct mrf24j40 *devrec;
 
        printk(KERN_INFO "mrf24j40: probe(). IRQ: %d\n", spi->irq);
@@ -650,31 +735,9 @@ static int mrf24j40_probe(struct spi_device *spi)
        if (ret)
                goto err_register_device;
 
-       /* Initialize the device.
-               From datasheet section 3.2: Initialization. */
-       write_short_reg(devrec, REG_SOFTRST, 0x07);
-       write_short_reg(devrec, REG_PACON2, 0x98);
-       write_short_reg(devrec, REG_TXSTBL, 0x95);
-       write_long_reg(devrec, REG_RFCON0, 0x03);
-       write_long_reg(devrec, REG_RFCON1, 0x01);
-       write_long_reg(devrec, REG_RFCON2, 0x80);
-       write_long_reg(devrec, REG_RFCON6, 0x90);
-       write_long_reg(devrec, REG_RFCON7, 0x80);
-       write_long_reg(devrec, REG_RFCON8, 0x10);
-       write_long_reg(devrec, REG_SLPCON1, 0x21);
-       write_short_reg(devrec, REG_BBREG2, 0x80);
-       write_short_reg(devrec, REG_CCAEDTH, 0x60);
-       write_short_reg(devrec, REG_BBREG6, 0x40);
-       write_short_reg(devrec, REG_RFCTL, 0x04);
-       write_short_reg(devrec, REG_RFCTL, 0x0);
-       udelay(192);
-
-       /* Set RX Mode. RXMCR<1:0>: 0x0 normal, 0x1 promisc, 0x2 error */
-       ret = read_short_reg(devrec, REG_RXMCR, &val);
+       ret = mrf24j40_hw_init(devrec);
        if (ret)
-               goto err_read_reg;
-       val &= ~0x3; /* Clear RX mode (normal) */
-       write_short_reg(devrec, REG_RXMCR, val);
+               goto err_hw_init;
 
        ret = devm_request_threaded_irq(&spi->dev,
                                        spi->irq,
@@ -692,7 +755,7 @@ static int mrf24j40_probe(struct spi_device *spi)
        return 0;
 
 err_irq:
-err_read_reg:
+err_hw_init:
        ieee802154_unregister_device(devrec->dev);
 err_register_device:
        ieee802154_free_device(devrec->dev);
index 46a7790..d2d4a3d 100644 (file)
@@ -269,8 +269,8 @@ static int __init ifb_init_one(int index)
        struct ifb_private *dp;
        int err;
 
-       dev_ifb = alloc_netdev(sizeof(struct ifb_private),
-                                "ifb%d", ifb_setup);
+       dev_ifb = alloc_netdev(sizeof(struct ifb_private), "ifb%d",
+                              NET_NAME_UNKNOWN, ifb_setup);
 
        if (!dev_ifb)
                return -ENOMEM;
index 96fe365..e638893 100644 (file)
@@ -553,8 +553,8 @@ static int kingsun_probe(struct usb_interface *intf,
        return 0;
 
 free_mem:
-       if (kingsun->out_buf) kfree(kingsun->out_buf);
-       if (kingsun->in_buf) kfree(kingsun->in_buf);
+       kfree(kingsun->out_buf);
+       kfree(kingsun->in_buf);
        free_netdev(net);
 err_out1:
        return ret;
index bb96409..8f22625 100644 (file)
@@ -195,7 +195,7 @@ static __net_init int loopback_net_init(struct net *net)
        int err;
 
        err = -ENOMEM;
-       dev = alloc_netdev(0, "lo", loopback_setup);
+       dev = alloc_netdev(0, "lo", NET_NAME_UNKNOWN, loopback_setup);
        if (!dev)
                goto out;
 
index b57c224..f3230ee 100644 (file)
@@ -74,7 +74,6 @@
 #include <linux/of_platform.h>
 #include <linux/of_device.h>
 #include <linux/uaccess.h>
-#include <asm/irq.h>
 
 
 MODULE_AUTHOR("Tom Lendacky <thomas.lendacky@amd.com>");
@@ -85,6 +84,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define XGBE_PHY_ID    0x000162d0
 #define XGBE_PHY_MASK  0xfffffff0
 
+#define XGBE_PHY_SPEEDSET_PROPERTY     "amd,speed-set"
+
 #define XGBE_AN_INT_CMPLT              0x01
 #define XGBE_AN_INC_LINK               0x02
 #define XGBE_AN_PG_RCV                 0x04
@@ -94,6 +95,8 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #define XNP_MP_FORMATTED               (1 << 13)
 #define XNP_NP_EXCHANGE                        (1 << 15)
 
+#define XGBE_PHY_RATECHANGE_COUNT      500
+
 #ifndef MDIO_PMA_10GBR_PMD_CTRL
 #define MDIO_PMA_10GBR_PMD_CTRL                0x0096
 #endif
@@ -116,10 +119,13 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 #endif
 
 /* SerDes integration register offsets */
+#define SIR0_KR_RT_1                   0x002c
 #define SIR0_STATUS                    0x0040
 #define SIR1_SPEED                     0x0000
 
 /* SerDes integration register entry bit positions and sizes */
+#define SIR0_KR_RT_1_RESET_INDEX       11
+#define SIR0_KR_RT_1_RESET_WIDTH       1
 #define SIR0_STATUS_RX_READY_INDEX     0
 #define SIR0_STATUS_RX_READY_WIDTH     1
 #define SIR0_STATUS_TX_READY_INDEX     8
@@ -145,7 +151,7 @@ MODULE_DESCRIPTION("AMD 10GbE (amd-xgbe) PHY driver");
 
 #define SPEED_2500_CDR                 0x2
 #define SPEED_2500_PLL                 0x0
-#define SPEED_2500_RATE                        0x2
+#define SPEED_2500_RATE                        0x1
 #define SPEED_2500_TXAMP               0xf
 #define SPEED_2500_WORD                        0x1
 
@@ -192,6 +198,16 @@ do {                                                                       \
        (_var) |= (((_val) & ((0x1 << (_width)) - 1)) << (_index));     \
 } while (0)
 
+#define XSIR_GET_BITS(_var, _prefix, _field)                           \
+       GET_BITS((_var),                                                \
+                _prefix##_##_field##_INDEX,                            \
+                _prefix##_##_field##_WIDTH)
+
+#define XSIR_SET_BITS(_var, _prefix, _field, _val)                     \
+       SET_BITS((_var),                                                \
+                _prefix##_##_field##_INDEX,                            \
+                _prefix##_##_field##_WIDTH, (_val))
+
 /* Macros for reading or writing SerDes integration registers
  *  The ioread macros will get bit fields or full values using the
  *  register definitions formed using the input names
@@ -292,6 +308,11 @@ enum amd_xgbe_phy_mode {
        AMD_XGBE_MODE_KX,
 };
 
+enum amd_xgbe_phy_speedset {
+       AMD_XGBE_PHY_SPEEDSET_1000_10000,
+       AMD_XGBE_PHY_SPEEDSET_2500_10000,
+};
+
 struct amd_xgbe_phy_priv {
        struct platform_device *pdev;
        struct device *dev;
@@ -311,6 +332,7 @@ struct amd_xgbe_phy_priv {
        /* Maintain link status for re-starting auto-negotiation */
        unsigned int link;
        enum amd_xgbe_phy_mode mode;
+       unsigned int speed_set;
 
        /* Auto-negotiation state machine support */
        struct mutex an_mutex;
@@ -380,14 +402,25 @@ static void amd_xgbe_phy_serdes_start_ratechange(struct phy_device *phydev)
 static void amd_xgbe_phy_serdes_complete_ratechange(struct phy_device *phydev)
 {
        struct amd_xgbe_phy_priv *priv = phydev->priv;
+       unsigned int wait;
+       u16 status;
 
        /* Release Rx and Tx ratechange */
        XSIR1_IOWRITE_BITS(priv, SIR1_SPEED, RATECHANGE, 0);
 
        /* Wait for Rx and Tx ready */
-       while (!XSIR0_IOREAD_BITS(priv, SIR0_STATUS, RX_READY) &&
-              !XSIR0_IOREAD_BITS(priv, SIR0_STATUS, TX_READY))
-               usleep_range(10, 20);
+       wait = XGBE_PHY_RATECHANGE_COUNT;
+       while (wait--) {
+               usleep_range(50, 75);
+
+               status = XSIR0_IOREAD(priv, SIR0_STATUS);
+               if (XSIR_GET_BITS(status, SIR0_STATUS, RX_READY) &&
+                   XSIR_GET_BITS(status, SIR0_STATUS, TX_READY))
+                       return;
+       }
+
+       netdev_dbg(phydev->attached_dev, "SerDes rx/tx not ready (%#hx)\n",
+                  status);
 }
 
 static int amd_xgbe_phy_xgmii_mode(struct phy_device *phydev)
@@ -546,10 +579,14 @@ static int amd_xgbe_phy_switch_mode(struct phy_device *phydev)
        int ret;
 
        /* If we are in KR switch to KX, and vice-versa */
-       if (priv->mode == AMD_XGBE_MODE_KR)
-               ret = amd_xgbe_phy_gmii_mode(phydev);
-       else
+       if (priv->mode == AMD_XGBE_MODE_KR) {
+               if (priv->speed_set == AMD_XGBE_PHY_SPEEDSET_1000_10000)
+                       ret = amd_xgbe_phy_gmii_mode(phydev);
+               else
+                       ret = amd_xgbe_phy_gmii_2500_mode(phydev);
+       } else {
                ret = amd_xgbe_phy_xgmii_mode(phydev);
+       }
 
        return ret;
 }
@@ -602,9 +639,13 @@ static enum amd_xgbe_phy_an amd_xgbe_an_tx_training(struct phy_device *phydev,
        if (ret < 0)
                return AMD_XGBE_AN_ERROR;
 
+       XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 1);
+
        ret |= 0x01;
        phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_PMD_CTRL, ret);
 
+       XSIR0_IOWRITE_BITS(priv, SIR0_KR_RT_1, RESET, 0);
+
        return AMD_XGBE_AN_EVENT;
 }
 
@@ -713,7 +754,8 @@ static enum amd_xgbe_phy_an amd_xgbe_an_start(struct phy_device *phydev)
        else
                ret &= ~0x80;
 
-       if (phydev->supported & SUPPORTED_1000baseKX_Full)
+       if ((phydev->supported & SUPPORTED_1000baseKX_Full) ||
+           (phydev->supported & SUPPORTED_2500baseX_Full))
                ret |= 0x20;
        else
                ret &= ~0x20;
@@ -815,6 +857,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
        struct phy_device *phydev = priv->phydev;
        enum amd_xgbe_phy_an cur_state;
        int sleep;
+       unsigned int an_supported = 0;
 
        while (1) {
                mutex_lock(&priv->an_mutex);
@@ -824,6 +867,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
                switch (priv->an_state) {
                case AMD_XGBE_AN_START:
                        priv->an_state = amd_xgbe_an_start(phydev);
+                       an_supported = 0;
                        break;
 
                case AMD_XGBE_AN_EVENT:
@@ -832,6 +876,7 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
 
                case AMD_XGBE_AN_PAGE_RECEIVED:
                        priv->an_state = amd_xgbe_an_page_received(phydev);
+                       an_supported++;
                        break;
 
                case AMD_XGBE_AN_INCOMPAT_LINK:
@@ -839,6 +884,11 @@ static void amd_xgbe_an_state_machine(struct work_struct *work)
                        break;
 
                case AMD_XGBE_AN_COMPLETE:
+                       netdev_info(phydev->attached_dev, "%s successful\n",
+                                   an_supported ? "Auto negotiation"
+                                                : "Parallel detection");
+                       /* fall through */
+
                case AMD_XGBE_AN_NO_LINK:
                case AMD_XGBE_AN_EXIT:
                        goto exit_unlock;
@@ -896,14 +946,22 @@ static int amd_xgbe_phy_soft_reset(struct phy_device *phydev)
 
 static int amd_xgbe_phy_config_init(struct phy_device *phydev)
 {
+       struct amd_xgbe_phy_priv *priv = phydev->priv;
+
        /* Initialize supported features */
        phydev->supported = SUPPORTED_Autoneg;
        phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
        phydev->supported |= SUPPORTED_Backplane;
-       phydev->supported |= SUPPORTED_1000baseKX_Full |
-                            SUPPORTED_2500baseX_Full;
        phydev->supported |= SUPPORTED_10000baseKR_Full |
                             SUPPORTED_10000baseR_FEC;
+       switch (priv->speed_set) {
+       case AMD_XGBE_PHY_SPEEDSET_1000_10000:
+               phydev->supported |= SUPPORTED_1000baseKX_Full;
+               break;
+       case AMD_XGBE_PHY_SPEEDSET_2500_10000:
+               phydev->supported |= SUPPORTED_2500baseX_Full;
+               break;
+       }
        phydev->advertising = phydev->supported;
 
        /* Turn off and clear interrupts */
@@ -1020,9 +1078,9 @@ static int amd_xgbe_phy_update_link(struct phy_device *phydev)
         * (re-)established (cable connected after the interface is
         * up, etc.), the link status may report no link. If there
         * is no link, try switching modes and checking the status
-        * again.
+        * again if auto negotiation is enabled.
         */
-       check_again = 1;
+       check_again = (phydev->autoneg == AUTONEG_ENABLE) ? 1 : 0;
 again:
        /* Link status is latched low, so read once to clear
         * and then read again to get current state
@@ -1038,8 +1096,10 @@ again:
        phydev->link = (ret & MDIO_STAT1_LSTATUS) ? 1 : 0;
 
        if (!phydev->link) {
-               ret = amd_xgbe_phy_switch_mode(phydev);
                if (check_again) {
+                       ret = amd_xgbe_phy_switch_mode(phydev);
+                       if (ret < 0)
+                               return ret;
                        check_again = 0;
                        goto again;
                }
@@ -1059,6 +1119,7 @@ again:
 
 static int amd_xgbe_phy_read_status(struct phy_device *phydev)
 {
+       struct amd_xgbe_phy_priv *priv = phydev->priv;
        u32 mmd_mask = phydev->c45_ids.devices_in_package;
        int ret, mode, ad_ret, lp_ret;
 
@@ -1108,9 +1169,19 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
                                        return ret;
                        }
                } else {
-                       phydev->speed = SPEED_1000;
+                       int (*mode_fcn)(struct phy_device *);
+
+                       if (priv->speed_set ==
+                           AMD_XGBE_PHY_SPEEDSET_1000_10000) {
+                               phydev->speed = SPEED_1000;
+                               mode_fcn = amd_xgbe_phy_gmii_mode;
+                       } else {
+                               phydev->speed = SPEED_2500;
+                               mode_fcn = amd_xgbe_phy_gmii_2500_mode;
+                       }
+
                        if (mode == MDIO_PCS_CTRL2_10GBR) {
-                               ret = amd_xgbe_phy_gmii_mode(phydev);
+                               ret = mode_fcn(phydev);
                                if (ret < 0)
                                        return ret;
                        }
@@ -1118,8 +1189,15 @@ static int amd_xgbe_phy_read_status(struct phy_device *phydev)
 
                phydev->duplex = DUPLEX_FULL;
        } else {
-               phydev->speed = (mode == MDIO_PCS_CTRL2_10GBR) ? SPEED_10000
-                                                              : SPEED_1000;
+               if (mode == MDIO_PCS_CTRL2_10GBR) {
+                       phydev->speed = SPEED_10000;
+               } else {
+                       if (priv->speed_set ==
+                           AMD_XGBE_PHY_SPEEDSET_1000_10000)
+                               phydev->speed = SPEED_1000;
+                       else
+                               phydev->speed = SPEED_2500;
+               }
                phydev->duplex = DUPLEX_FULL;
                phydev->pause = 0;
                phydev->asym_pause = 0;
@@ -1176,6 +1254,8 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
        struct platform_device *pdev;
        struct device *dev;
        char *wq_name;
+       const __be32 *property;
+       unsigned int speed_set;
        int ret;
 
        if (!phydev->dev.of_node)
@@ -1227,6 +1307,26 @@ static int amd_xgbe_phy_probe(struct phy_device *phydev)
                goto err_sir0;
        }
 
+       /* Get the device speed set property */
+       speed_set = 0;
+       property = of_get_property(dev->of_node, XGBE_PHY_SPEEDSET_PROPERTY,
+                                  NULL);
+       if (property)
+               speed_set = be32_to_cpu(*property);
+
+       switch (speed_set) {
+       case 0:
+               priv->speed_set = AMD_XGBE_PHY_SPEEDSET_1000_10000;
+               break;
+       case 1:
+               priv->speed_set = AMD_XGBE_PHY_SPEEDSET_2500_10000;
+               break;
+       default:
+               dev_err(dev, "invalid amd,speed-set property\n");
+               ret = -EINVAL;
+               goto err_sir1;
+       }
+
        priv->link = 1;
 
        ret = phy_read_mmd(phydev, MDIO_MMD_PCS, MDIO_CTRL2);
index 6c622ae..fdc1b41 100644 (file)
 #include <linux/string.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio/consumer.h>
 
 #define AT803X_INTR_ENABLE                     0x12
 #define AT803X_INTR_STATUS                     0x13
+#define AT803X_SMART_SPEED                     0x14
+#define AT803X_LED_CONTROL                     0x18
 #define AT803X_WOL_ENABLE                      0x01
 #define AT803X_DEVICE_ADDR                     0x03
 #define AT803X_LOC_MAC_ADDR_0_15_OFFSET                0x804C
 #define AT803X_DEBUG_SYSTEM_MODE_CTRL          0x05
 #define AT803X_DEBUG_RGMII_TX_CLK_DLY          BIT(8)
 
+#define ATH8030_PHY_ID 0x004dd076
+#define ATH8031_PHY_ID 0x004dd074
+#define ATH8035_PHY_ID 0x004dd072
+
 MODULE_DESCRIPTION("Atheros 803x PHY driver");
 MODULE_AUTHOR("Matus Ujhelyi");
 MODULE_LICENSE("GPL");
 
+struct at803x_priv {
+       bool phy_reset:1;
+       struct gpio_desc *gpiod_reset;
+};
+
+struct at803x_context {
+       u16 bmcr;
+       u16 advertise;
+       u16 control1000;
+       u16 int_enable;
+       u16 smart_speed;
+       u16 led_control;
+};
+
+/* save relevant PHY registers to private copy */
+static void at803x_context_save(struct phy_device *phydev,
+                               struct at803x_context *context)
+{
+       context->bmcr = phy_read(phydev, MII_BMCR);
+       context->advertise = phy_read(phydev, MII_ADVERTISE);
+       context->control1000 = phy_read(phydev, MII_CTRL1000);
+       context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
+       context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
+       context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
+}
+
+/* restore relevant PHY registers from private copy */
+static void at803x_context_restore(struct phy_device *phydev,
+                                  const struct at803x_context *context)
+{
+       phy_write(phydev, MII_BMCR, context->bmcr);
+       phy_write(phydev, MII_ADVERTISE, context->advertise);
+       phy_write(phydev, MII_CTRL1000, context->control1000);
+       phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
+       phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
+       phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
+}
+
 static int at803x_set_wol(struct phy_device *phydev,
                          struct ethtool_wolinfo *wol)
 {
@@ -142,6 +188,26 @@ static int at803x_resume(struct phy_device *phydev)
        return 0;
 }
 
+static int at803x_probe(struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct at803x_priv *priv;
+
+       priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+       if (!priv)
+               return -ENOMEM;
+
+       priv->gpiod_reset = devm_gpiod_get(dev, "reset");
+       if (IS_ERR(priv->gpiod_reset))
+               priv->gpiod_reset = NULL;
+       else
+               gpiod_direction_output(priv->gpiod_reset, 1);
+
+       phydev->priv = priv;
+
+       return 0;
+}
+
 static int at803x_config_init(struct phy_device *phydev)
 {
        int ret;
@@ -189,58 +255,99 @@ static int at803x_config_intr(struct phy_device *phydev)
        return err;
 }
 
+static void at803x_link_change_notify(struct phy_device *phydev)
+{
+       struct at803x_priv *priv = phydev->priv;
+
+       /*
+        * Conduct a hardware reset for AT8030 every time a link loss is
+        * signalled. This is necessary to circumvent a hardware bug that
+        * occurs when the cable is unplugged while TX packets are pending
+        * in the FIFO. In such cases, the FIFO enters an error mode it
+        * cannot recover from by software.
+        */
+       if (phydev->drv->phy_id == ATH8030_PHY_ID) {
+               if (phydev->state == PHY_NOLINK) {
+                       if (priv->gpiod_reset && !priv->phy_reset) {
+                               struct at803x_context context;
+
+                               at803x_context_save(phydev, &context);
+
+                               gpiod_set_value(priv->gpiod_reset, 0);
+                               msleep(1);
+                               gpiod_set_value(priv->gpiod_reset, 1);
+                               msleep(1);
+
+                               at803x_context_restore(phydev, &context);
+
+                               dev_dbg(&phydev->dev, "%s(): phy was reset\n",
+                                       __func__);
+                               priv->phy_reset = true;
+                       }
+               } else {
+                       priv->phy_reset = false;
+               }
+       }
+}
+
 static struct phy_driver at803x_driver[] = {
 {
        /* ATHEROS 8035 */
-       .phy_id         = 0x004dd072,
-       .name           = "Atheros 8035 ethernet",
-       .phy_id_mask    = 0xffffffef,
-       .config_init    = at803x_config_init,
-       .set_wol        = at803x_set_wol,
-       .get_wol        = at803x_get_wol,
-       .suspend        = at803x_suspend,
-       .resume         = at803x_resume,
-       .features       = PHY_GBIT_FEATURES,
-       .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .driver         = {
+       .phy_id                 = ATH8035_PHY_ID,
+       .name                   = "Atheros 8035 ethernet",
+       .phy_id_mask            = 0xffffffef,
+       .probe                  = at803x_probe,
+       .config_init            = at803x_config_init,
+       .link_change_notify     = at803x_link_change_notify,
+       .set_wol                = at803x_set_wol,
+       .get_wol                = at803x_get_wol,
+       .suspend                = at803x_suspend,
+       .resume                 = at803x_resume,
+       .features               = PHY_GBIT_FEATURES,
+       .flags                  = PHY_HAS_INTERRUPT,
+       .config_aneg            = genphy_config_aneg,
+       .read_status            = genphy_read_status,
+       .driver                 = {
                .owner = THIS_MODULE,
        },
 }, {
        /* ATHEROS 8030 */
-       .phy_id         = 0x004dd076,
-       .name           = "Atheros 8030 ethernet",
-       .phy_id_mask    = 0xffffffef,
-       .config_init    = at803x_config_init,
-       .set_wol        = at803x_set_wol,
-       .get_wol        = at803x_get_wol,
-       .suspend        = at803x_suspend,
-       .resume         = at803x_resume,
-       .features       = PHY_GBIT_FEATURES,
-       .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .driver         = {
+       .phy_id                 = ATH8030_PHY_ID,
+       .name                   = "Atheros 8030 ethernet",
+       .phy_id_mask            = 0xffffffef,
+       .probe                  = at803x_probe,
+       .config_init            = at803x_config_init,
+       .link_change_notify     = at803x_link_change_notify,
+       .set_wol                = at803x_set_wol,
+       .get_wol                = at803x_get_wol,
+       .suspend                = at803x_suspend,
+       .resume                 = at803x_resume,
+       .features               = PHY_GBIT_FEATURES,
+       .flags                  = PHY_HAS_INTERRUPT,
+       .config_aneg            = genphy_config_aneg,
+       .read_status            = genphy_read_status,
+       .driver                 = {
                .owner = THIS_MODULE,
        },
 }, {
        /* ATHEROS 8031 */
-       .phy_id         = 0x004dd074,
-       .name           = "Atheros 8031 ethernet",
-       .phy_id_mask    = 0xffffffef,
-       .config_init    = at803x_config_init,
-       .set_wol        = at803x_set_wol,
-       .get_wol        = at803x_get_wol,
-       .suspend        = at803x_suspend,
-       .resume         = at803x_resume,
-       .features       = PHY_GBIT_FEATURES,
-       .flags          = PHY_HAS_INTERRUPT,
-       .config_aneg    = genphy_config_aneg,
-       .read_status    = genphy_read_status,
-       .ack_interrupt  = &at803x_ack_interrupt,
-       .config_intr    = &at803x_config_intr,
-       .driver         = {
+       .phy_id                 = ATH8031_PHY_ID,
+       .name                   = "Atheros 8031 ethernet",
+       .phy_id_mask            = 0xffffffef,
+       .probe                  = at803x_probe,
+       .config_init            = at803x_config_init,
+       .link_change_notify     = at803x_link_change_notify,
+       .set_wol                = at803x_set_wol,
+       .get_wol                = at803x_get_wol,
+       .suspend                = at803x_suspend,
+       .resume                 = at803x_resume,
+       .features               = PHY_GBIT_FEATURES,
+       .flags                  = PHY_HAS_INTERRUPT,
+       .config_aneg            = genphy_config_aneg,
+       .read_status            = genphy_read_status,
+       .ack_interrupt          = &at803x_ack_interrupt,
+       .config_intr            = &at803x_config_intr,
+       .driver                 = {
                .owner = THIS_MODULE,
        },
 } };
@@ -260,9 +367,9 @@ module_init(atheros_init);
 module_exit(atheros_exit);
 
 static struct mdio_device_id __maybe_unused atheros_tbl[] = {
-       { 0x004dd076, 0xffffffef },
-       { 0x004dd074, 0xffffffef },
-       { 0x004dd072, 0xffffffef },
+       { ATH8030_PHY_ID, 0xffffffef },
+       { ATH8031_PHY_ID, 0xffffffef },
+       { ATH8035_PHY_ID, 0xffffffef },
        { }
 };
 
index 6a999e6..c301e4c 100644 (file)
@@ -40,6 +40,7 @@
 #define LAYER2         0x01
 #define MAX_RXTS       64
 #define N_EXT_TS       6
+#define N_PER_OUT      7
 #define PSF_PTPVER     2
 #define PSF_EVNT       0x4000
 #define PSF_RX         0x2000
@@ -47,7 +48,6 @@
 #define EXT_EVENT      1
 #define CAL_EVENT      7
 #define CAL_TRIGGER    7
-#define PER_TRIGGER    6
 #define DP83640_N_PINS 12
 
 #define MII_DP83640_MICR 0x11
 #define ENDIAN_FLAG    PSF_ENDIAN
 #endif
 
-#define SKB_PTP_TYPE(__skb) (*(unsigned int *)((__skb)->cb))
+struct dp83640_skb_info {
+       int ptp_type;
+       unsigned long tmo;
+};
 
 struct phy_rxts {
        u16 ns_lo;   /* ns[15:0] */
@@ -300,23 +303,23 @@ static u64 phy2txts(struct phy_txts *p)
 }
 
 static int periodic_output(struct dp83640_clock *clock,
-                          struct ptp_clock_request *clkreq, bool on)
+                          struct ptp_clock_request *clkreq, bool on,
+                          int trigger)
 {
        struct dp83640_private *dp83640 = clock->chosen;
        struct phy_device *phydev = dp83640->phydev;
        u32 sec, nsec, pwidth;
-       u16 gpio, ptp_trig, trigger, val;
+       u16 gpio, ptp_trig, val;
 
        if (on) {
-               gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT, 0);
+               gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PEROUT,
+                                       trigger);
                if (gpio < 1)
                        return -EINVAL;
        } else {
                gpio = 0;
        }
 
-       trigger = PER_TRIGGER;
-
        ptp_trig = TRIG_WR |
                (trigger & TRIG_CSEL_MASK) << TRIG_CSEL_SHIFT |
                (gpio & TRIG_GPIO_MASK) << TRIG_GPIO_SHIFT |
@@ -353,6 +356,11 @@ static int periodic_output(struct dp83640_clock *clock,
        ext_write(0, phydev, PAGE4, PTP_TDR, sec >> 16);       /* sec[31:16] */
        ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff); /* ns[15:0] */
        ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16);    /* ns[31:16] */
+       /* Triggers 0 and 1 has programmable pulsewidth2 */
+       if (trigger < 2) {
+               ext_write(0, phydev, PAGE4, PTP_TDR, pwidth & 0xffff);
+               ext_write(0, phydev, PAGE4, PTP_TDR, pwidth >> 16);
+       }
 
        /*enable trigger*/
        val &= ~TRIG_LOAD;
@@ -491,9 +499,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
                return 0;
 
        case PTP_CLK_REQ_PEROUT:
-               if (rq->perout.index != 0)
+               if (rq->perout.index >= N_PER_OUT)
                        return -EINVAL;
-               return periodic_output(clock, rq, on);
+               return periodic_output(clock, rq, on, rq->perout.index);
 
        default:
                break;
@@ -505,6 +513,16 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
 static int ptp_dp83640_verify(struct ptp_clock_info *ptp, unsigned int pin,
                              enum ptp_pin_function func, unsigned int chan)
 {
+       struct dp83640_clock *clock =
+               container_of(ptp, struct dp83640_clock, caps);
+
+       if (clock->caps.pin_config[pin].func == PTP_PF_PHYSYNC &&
+           !list_empty(&clock->phylist))
+               return 1;
+
+       if (func == PTP_PF_PHYSYNC)
+               return 1;
+
        return 0;
 }
 
@@ -594,7 +612,11 @@ static void recalibrate(struct dp83640_clock *clock)
        u16 cal_gpio, cfg0, evnt, ptp_trig, trigger, val;
 
        trigger = CAL_TRIGGER;
-       cal_gpio = gpio_tab[CALIBRATE_GPIO];
+       cal_gpio = 1 + ptp_find_pin(clock->ptp_clock, PTP_PF_PHYSYNC, 0);
+       if (cal_gpio < 1) {
+               pr_err("PHY calibration pin not avaible - PHY is not calibrated.");
+               return;
+       }
 
        mutex_lock(&clock->extreg_lock);
 
@@ -736,6 +758,9 @@ static int decode_evnt(struct dp83640_private *dp83640,
        event.type = PTP_CLOCK_EXTTS;
        event.timestamp = phy2txts(&dp83640->edata);
 
+       /* Compensate for input path and synchronization delays */
+       event.timestamp -= 35;
+
        for (i = 0; i < N_EXT_TS; i++) {
                if (ext_status & exts_chan_to_edata(i)) {
                        event.index = i;
@@ -746,10 +771,51 @@ static int decode_evnt(struct dp83640_private *dp83640,
        return parsed * sizeof(u16);
 }
 
+static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
+{
+       u16 *seqid;
+       unsigned int offset = 0;
+       u8 *msgtype, *data = skb_mac_header(skb);
+
+       /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
+
+       if (type & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (type & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
+               break;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
+               break;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
+               break;
+       default:
+               return 0;
+       }
+
+       if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
+               return 0;
+
+       if (unlikely(type & PTP_CLASS_V1))
+               msgtype = data + offset + OFF_PTP_CONTROL;
+       else
+               msgtype = data + offset;
+
+       seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
+
+       return rxts->msgtype == (*msgtype & 0xf) &&
+               rxts->seqid   == ntohs(*seqid);
+}
+
 static void decode_rxts(struct dp83640_private *dp83640,
                        struct phy_rxts *phy_rxts)
 {
        struct rxts *rxts;
+       struct skb_shared_hwtstamps *shhwtstamps = NULL;
+       struct sk_buff *skb;
        unsigned long flags;
 
        spin_lock_irqsave(&dp83640->rx_lock, flags);
@@ -763,7 +829,26 @@ static void decode_rxts(struct dp83640_private *dp83640,
        rxts = list_first_entry(&dp83640->rxpool, struct rxts, list);
        list_del_init(&rxts->list);
        phy2rxts(phy_rxts, rxts);
-       list_add_tail(&rxts->list, &dp83640->rxts);
+
+       spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
+       skb_queue_walk(&dp83640->rx_queue, skb) {
+               struct dp83640_skb_info *skb_info;
+
+               skb_info = (struct dp83640_skb_info *)skb->cb;
+               if (match(skb, skb_info->ptp_type, rxts)) {
+                       __skb_unlink(skb, &dp83640->rx_queue);
+                       shhwtstamps = skb_hwtstamps(skb);
+                       memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+                       shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
+                       netif_rx_ni(skb);
+                       list_add(&rxts->list, &dp83640->rxpool);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
+
+       if (!shhwtstamps)
+               list_add_tail(&rxts->list, &dp83640->rxts);
 out:
        spin_unlock_irqrestore(&dp83640->rx_lock, flags);
 }
@@ -837,20 +922,18 @@ static int is_sync(struct sk_buff *skb, int type)
        u8 *data = skb->data, *msgtype;
        unsigned int offset = 0;
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
+       if (type & PTP_CLASS_VLAN)
+               offset += VLAN_HLEN;
+
+       switch (type & PTP_CLASS_PMASK) {
+       case PTP_CLASS_IPV4:
+               offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
+       case PTP_CLASS_IPV6:
+               offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
                break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
+       case PTP_CLASS_L2:
+               offset += ETH_HLEN;
                break;
        default:
                return 0;
@@ -867,47 +950,6 @@ static int is_sync(struct sk_buff *skb, int type)
        return (*msgtype & 0xf) == 0;
 }
 
-static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
-{
-       u16 *seqid;
-       unsigned int offset;
-       u8 *msgtype, *data = skb_mac_header(skb);
-
-       /* check sequenceID, messageType, 12 bit hash of offset 20-29 */
-
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V2_IPV4:
-               offset = ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN;
-               break;
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV6:
-               offset = OFF_PTP6;
-               break;
-       case PTP_CLASS_V2_L2:
-               offset = ETH_HLEN;
-               break;
-       case PTP_CLASS_V2_VLAN:
-               offset = ETH_HLEN + VLAN_HLEN;
-               break;
-       default:
-               return 0;
-       }
-
-       if (skb->len + ETH_HLEN < offset + OFF_PTP_SEQUENCE_ID + sizeof(*seqid))
-               return 0;
-
-       if (unlikely(type & PTP_CLASS_V1))
-               msgtype = data + offset + OFF_PTP_CONTROL;
-       else
-               msgtype = data + offset;
-
-       seqid = (u16 *)(data + offset + OFF_PTP_SEQUENCE_ID);
-
-       return rxts->msgtype == (*msgtype & 0xf) &&
-               rxts->seqid   == ntohs(*seqid);
-}
-
 static void dp83640_free_clocks(void)
 {
        struct dp83640_clock *clock;
@@ -944,7 +986,7 @@ static void dp83640_clock_init(struct dp83640_clock *clock, struct mii_bus *bus)
        clock->caps.max_adj     = 1953124;
        clock->caps.n_alarm     = 0;
        clock->caps.n_ext_ts    = N_EXT_TS;
-       clock->caps.n_per_out   = 1;
+       clock->caps.n_per_out   = N_PER_OUT;
        clock->caps.n_pins      = DP83640_N_PINS;
        clock->caps.pps         = 0;
        clock->caps.adjfreq     = ptp_dp83640_adjfreq;
@@ -1284,47 +1326,34 @@ static void rx_timestamp_work(struct work_struct *work)
 {
        struct dp83640_private *dp83640 =
                container_of(work, struct dp83640_private, ts_work);
-       struct list_head *this, *next;
-       struct rxts *rxts;
-       struct skb_shared_hwtstamps *shhwtstamps;
        struct sk_buff *skb;
-       unsigned int type;
-       unsigned long flags;
 
-       /* Deliver each deferred packet, with or without a time stamp. */
-
-       while ((skb = skb_dequeue(&dp83640->rx_queue)) != NULL) {
-               type = SKB_PTP_TYPE(skb);
-               spin_lock_irqsave(&dp83640->rx_lock, flags);
-               list_for_each_safe(this, next, &dp83640->rxts) {
-                       rxts = list_entry(this, struct rxts, list);
-                       if (match(skb, type, rxts)) {
-                               shhwtstamps = skb_hwtstamps(skb);
-                               memset(shhwtstamps, 0, sizeof(*shhwtstamps));
-                               shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
-                               list_del_init(&rxts->list);
-                               list_add(&rxts->list, &dp83640->rxpool);
-                               break;
-                       }
+       /* Deliver expired packets. */
+       while ((skb = skb_dequeue(&dp83640->rx_queue))) {
+               struct dp83640_skb_info *skb_info;
+
+               skb_info = (struct dp83640_skb_info *)skb->cb;
+               if (!time_after(jiffies, skb_info->tmo)) {
+                       skb_queue_head(&dp83640->rx_queue, skb);
+                       break;
                }
-               spin_unlock_irqrestore(&dp83640->rx_lock, flags);
+
                netif_rx_ni(skb);
        }
 
-       /* Clear out expired time stamps. */
-
-       spin_lock_irqsave(&dp83640->rx_lock, flags);
-       prune_rx_ts(dp83640);
-       spin_unlock_irqrestore(&dp83640->rx_lock, flags);
+       if (!skb_queue_empty(&dp83640->rx_queue))
+               schedule_work(&dp83640->ts_work);
 }
 
 static bool dp83640_rxtstamp(struct phy_device *phydev,
                             struct sk_buff *skb, int type)
 {
        struct dp83640_private *dp83640 = phydev->priv;
-
-       if (!dp83640->hwts_rx_en)
-               return false;
+       struct dp83640_skb_info *skb_info = (struct dp83640_skb_info *)skb->cb;
+       struct list_head *this, *next;
+       struct rxts *rxts;
+       struct skb_shared_hwtstamps *shhwtstamps = NULL;
+       unsigned long flags;
 
        if (is_status_frame(skb, type)) {
                decode_status_frame(dp83640, skb);
@@ -1332,9 +1361,30 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
                return true;
        }
 
-       SKB_PTP_TYPE(skb) = type;
-       skb_queue_tail(&dp83640->rx_queue, skb);
-       schedule_work(&dp83640->ts_work);
+       if (!dp83640->hwts_rx_en)
+               return false;
+
+       spin_lock_irqsave(&dp83640->rx_lock, flags);
+       list_for_each_safe(this, next, &dp83640->rxts) {
+               rxts = list_entry(this, struct rxts, list);
+               if (match(skb, type, rxts)) {
+                       shhwtstamps = skb_hwtstamps(skb);
+                       memset(shhwtstamps, 0, sizeof(*shhwtstamps));
+                       shhwtstamps->hwtstamp = ns_to_ktime(rxts->ns);
+                       netif_rx_ni(skb);
+                       list_del_init(&rxts->list);
+                       list_add(&rxts->list, &dp83640->rxpool);
+                       break;
+               }
+       }
+       spin_unlock_irqrestore(&dp83640->rx_lock, flags);
+
+       if (!shhwtstamps) {
+               skb_info->ptp_type = type;
+               skb_info->tmo = jiffies + 2;
+               skb_queue_tail(&dp83640->rx_queue, skb);
+               schedule_work(&dp83640->ts_work);
+       }
 
        return true;
 }
@@ -1355,7 +1405,6 @@ static void dp83640_txtstamp(struct phy_device *phydev,
        case HWTSTAMP_TX_ON:
                skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
                skb_queue_tail(&dp83640->tx_queue, skb);
-               schedule_work(&dp83640->ts_work);
                break;
 
        case HWTSTAMP_TX_OFF:
index 2e58aa5..203651e 100644 (file)
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
        return d ? to_mii_bus(d) : NULL;
 }
 EXPORT_SYMBOL(of_mdio_find_bus);
+
+/* Walk the list of subnodes of a mdio bus and look for a node that matches the
+ * phy's address with its 'reg' property. If found, set the of_node pointer for
+ * the phy. This allows auto-probed pyh devices to be supplied with information
+ * passed in via DT.
+ */
+static void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                  struct phy_device *phydev)
+{
+       struct device *dev = &phydev->dev;
+       struct device_node *child;
+
+       if (dev->of_node || !mdio->dev.of_node)
+               return;
+
+       for_each_available_child_of_node(mdio->dev.of_node, child) {
+               int addr;
+               int ret;
+
+               ret = of_property_read_u32(child, "reg", &addr);
+               if (ret < 0) {
+                       dev_err(dev, "%s has invalid PHY address\n",
+                               child->full_name);
+                       continue;
+               }
+
+               /* A PHY must have a reg property in the range [0-31] */
+               if (addr >= PHY_MAX_ADDR) {
+                       dev_err(dev, "%s PHY address %i is too large\n",
+                               child->full_name, addr);
+                       continue;
+               }
+
+               if (addr == phydev->addr) {
+                       dev->of_node = child;
+                       return;
+               }
+       }
+}
+#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
+static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
+                                         struct phy_device *phydev)
+{
+}
 #endif
 
 /**
@@ -211,6 +255,7 @@ int mdiobus_register(struct mii_bus *bus)
 
        bus->dev.parent = bus->parent;
        bus->dev.class = &mdio_bus_class;
+       bus->dev.driver = bus->parent->driver;
        bus->dev.groups = NULL;
        dev_set_name(&bus->dev, "%s", bus->id);
 
index bc7c7d2..fd0ea7c 100644 (file)
@@ -420,6 +420,26 @@ static int ksz8873mll_config_aneg(struct phy_device *phydev)
        return 0;
 }
 
+/* This routine returns -1 as an indication to the caller that the
+ * Micrel ksz9021 10/100/1000 PHY does not support standard IEEE
+ * MMD extended PHY registers.
+ */
+static int
+ksz9021_rd_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
+                     int regnum)
+{
+       return -1;
+}
+
+/* This routine does nothing since the Micrel ksz9021 does not support
+ * standard IEEE MMD extended PHY registers.
+ */
+static void
+ksz9021_wr_mmd_phyreg(struct phy_device *phydev, int ptrad, int devnum,
+                     int regnum, u32 val)
+{
+}
+
 static struct phy_driver ksphy_driver[] = {
 {
        .phy_id         = PHY_ID_KS8737,
@@ -565,6 +585,8 @@ static struct phy_driver ksphy_driver[] = {
        .config_intr    = ksz9021_config_intr,
        .suspend        = genphy_suspend,
        .resume         = genphy_resume,
+       .read_mmd_indirect = ksz9021_rd_mmd_phyreg,
+       .write_mmd_indirect = ksz9021_wr_mmd_phyreg,
        .driver         = { .owner = THIS_MODULE, },
 }, {
        .phy_id         = PHY_ID_KSZ9031,
index 3bc079a..c94e2a2 100644 (file)
@@ -138,10 +138,30 @@ struct phy_setting {
 /* A mapping of all SUPPORTED settings to speed/duplex */
 static const struct phy_setting settings[] = {
        {
-               .speed = 10000,
+               .speed = SPEED_10000,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_10000baseKR_Full,
+       },
+       {
+               .speed = SPEED_10000,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_10000baseKX4_Full,
+       },
+       {
+               .speed = SPEED_10000,
                .duplex = DUPLEX_FULL,
                .setting = SUPPORTED_10000baseT_Full,
        },
+       {
+               .speed = SPEED_2500,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_2500baseX_Full,
+       },
+       {
+               .speed = SPEED_1000,
+               .duplex = DUPLEX_FULL,
+               .setting = SUPPORTED_1000baseKX_Full,
+       },
        {
                .speed = SPEED_1000,
                .duplex = DUPLEX_FULL,
@@ -720,6 +740,9 @@ void phy_state_machine(struct work_struct *work)
 
        mutex_lock(&phydev->lock);
 
+       if (phydev->drv->link_change_notify)
+               phydev->drv->link_change_notify(phydev);
+
        switch (phydev->state) {
        case PHY_DOWN:
        case PHY_STARTING:
@@ -919,7 +942,7 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
 
 /**
  * phy_read_mmd_indirect - reads data from the MMD registers
- * @bus: the target MII bus
+ * @phydev: The PHY device bus
  * @prtad: MMD Address
  * @devad: MMD DEVAD
  * @addr: PHY address on the MII bus
@@ -932,18 +955,26 @@ static inline void mmd_phy_indirect(struct mii_bus *bus, int prtad, int devad,
  * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  * 3) Read  reg 14 // Read MMD data
  */
-static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
-                                int addr)
+static int phy_read_mmd_indirect(struct phy_device *phydev, int prtad,
+                                int devad, int addr)
 {
-       mmd_phy_indirect(bus, prtad, devad, addr);
+       struct phy_driver *phydrv = phydev->drv;
+       int value = -1;
 
-       /* Read the content of the MMD's selected register */
-       return bus->read(bus, addr, MII_MMD_DATA);
+       if (phydrv->read_mmd_indirect == NULL) {
+               mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+
+               /* Read the content of the MMD's selected register */
+               value = phydev->bus->read(phydev->bus, addr, MII_MMD_DATA);
+       } else {
+               value = phydrv->read_mmd_indirect(phydev, prtad, devad, addr);
+       }
+       return value;
 }
 
 /**
  * phy_write_mmd_indirect - writes data to the MMD registers
- * @bus: the target MII bus
+ * @phydev: The PHY device
  * @prtad: MMD Address
  * @devad: MMD DEVAD
  * @addr: PHY address on the MII bus
@@ -957,13 +988,19 @@ static int phy_read_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
  * 3) Write reg 13 // MMD Data Command for MMD DEVAD
  * 3) Write reg 14 // Write MMD data
  */
-static void phy_write_mmd_indirect(struct mii_bus *bus, int prtad, int devad,
-                                  int addr, u32 data)
+static void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
+                                  int devad, int addr, u32 data)
 {
-       mmd_phy_indirect(bus, prtad, devad, addr);
+       struct phy_driver *phydrv = phydev->drv;
 
-       /* Write the data into MMD's selected register */
-       bus->write(bus, addr, MII_MMD_DATA, data);
+       if (phydrv->write_mmd_indirect == NULL) {
+               mmd_phy_indirect(phydev->bus, prtad, devad, addr);
+
+               /* Write the data into MMD's selected register */
+               phydev->bus->write(phydev->bus, addr, MII_MMD_DATA, data);
+       } else {
+               phydrv->write_mmd_indirect(phydev, prtad, devad, addr, data);
+       }
 }
 
 /**
@@ -997,7 +1034,7 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                        return status;
 
                /* First check if the EEE ability is supported */
-               eee_cap = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+               eee_cap = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
                                                MDIO_MMD_PCS, phydev->addr);
                if (eee_cap < 0)
                        return eee_cap;
@@ -1009,12 +1046,12 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                /* Check which link settings negotiated and verify it in
                 * the EEE advertising registers.
                 */
-               eee_lp = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+               eee_lp = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
                                               MDIO_MMD_AN, phydev->addr);
                if (eee_lp < 0)
                        return eee_lp;
 
-               eee_adv = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+               eee_adv = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
                                                MDIO_MMD_AN, phydev->addr);
                if (eee_adv < 0)
                        return eee_adv;
@@ -1029,15 +1066,16 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
                        /* Configure the PHY to stop receiving xMII
                         * clock while it is signaling LPI.
                         */
-                       int val = phy_read_mmd_indirect(phydev->bus, MDIO_CTRL1,
+                       int val = phy_read_mmd_indirect(phydev, MDIO_CTRL1,
                                                        MDIO_MMD_PCS,
                                                        phydev->addr);
                        if (val < 0)
                                return val;
 
                        val |= MDIO_PCS_CTRL1_CLKSTOP_EN;
-                       phy_write_mmd_indirect(phydev->bus, MDIO_CTRL1,
-                                              MDIO_MMD_PCS, phydev->addr, val);
+                       phy_write_mmd_indirect(phydev, MDIO_CTRL1,
+                                              MDIO_MMD_PCS, phydev->addr,
+                                              val);
                }
 
                return 0; /* EEE supported */
@@ -1056,7 +1094,7 @@ EXPORT_SYMBOL(phy_init_eee);
  */
 int phy_get_eee_err(struct phy_device *phydev)
 {
-       return phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_WK_ERR,
+       return phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_WK_ERR,
                                     MDIO_MMD_PCS, phydev->addr);
 }
 EXPORT_SYMBOL(phy_get_eee_err);
@@ -1074,21 +1112,21 @@ int phy_ethtool_get_eee(struct phy_device *phydev, struct ethtool_eee *data)
        int val;
 
        /* Get Supported EEE */
-       val = phy_read_mmd_indirect(phydev->bus, MDIO_PCS_EEE_ABLE,
+       val = phy_read_mmd_indirect(phydev, MDIO_PCS_EEE_ABLE,
                                    MDIO_MMD_PCS, phydev->addr);
        if (val < 0)
                return val;
        data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
 
        /* Get advertisement EEE */
-       val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV,
+       val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_ADV,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
        data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
 
        /* Get LP advertisement EEE */
-       val = phy_read_mmd_indirect(phydev->bus, MDIO_AN_EEE_LPABLE,
+       val = phy_read_mmd_indirect(phydev, MDIO_AN_EEE_LPABLE,
                                    MDIO_MMD_AN, phydev->addr);
        if (val < 0)
                return val;
@@ -1109,7 +1147,7 @@ int phy_ethtool_set_eee(struct phy_device *phydev, struct ethtool_eee *data)
 {
        int val = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
 
-       phy_write_mmd_indirect(phydev->bus, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
+       phy_write_mmd_indirect(phydev, MDIO_AN_EEE_ADV, MDIO_MMD_AN,
                               phydev->addr, val);
 
        return 0;
index 35d753d..ca5ec3e 100644 (file)
@@ -355,7 +355,7 @@ int phy_device_register(struct phy_device *phydev)
        phydev->bus->phy_map[phydev->addr] = phydev;
 
        /* Run all of the fixups for this PHY */
-       err = phy_init_hw(phydev);
+       err = phy_scan_fixups(phydev);
        if (err) {
                pr_err("PHY %d failed to initialize\n", phydev->addr);
                goto out;
@@ -575,6 +575,7 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                      u32 flags, phy_interface_t interface)
 {
        struct device *d = &phydev->dev;
+       struct module *bus_module;
        int err;
 
        /* Assume that if there is no driver, that it doesn't
@@ -599,6 +600,14 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev,
                return -EBUSY;
        }
 
+       /* Increment the bus module reference count */
+       bus_module = phydev->bus->dev.driver ?
+                    phydev->bus->dev.driver->owner : NULL;
+       if (!try_module_get(bus_module)) {
+               dev_err(&dev->dev, "failed to get the bus module\n");
+               return -EIO;
+       }
+
        phydev->attached_dev = dev;
        dev->phydev = phydev;
 
@@ -664,6 +673,10 @@ EXPORT_SYMBOL(phy_attach);
 void phy_detach(struct phy_device *phydev)
 {
        int i;
+
+       if (phydev->bus->dev.driver)
+               module_put(phydev->bus->dev.driver->owner);
+
        phydev->attached_dev->phydev = NULL;
        phydev->attached_dev = NULL;
        phy_suspend(phydev);
@@ -696,6 +709,7 @@ int phy_suspend(struct phy_device *phydev)
                return phydrv->suspend(phydev);
        return 0;
 }
+EXPORT_SYMBOL(phy_suspend);
 
 int phy_resume(struct phy_device *phydev)
 {
@@ -705,6 +719,7 @@ int phy_resume(struct phy_device *phydev)
                return phydrv->resume(phydev);
        return 0;
 }
+EXPORT_SYMBOL(phy_resume);
 
 /* Generic PHY support and helper functions */
 
index 22b047f..eab57fc 100644 (file)
@@ -277,7 +277,7 @@ static int ks8995_probe(struct spi_device *spi)
        /* Chip description */
        pdata = spi->dev.platform_data;
 
-       ks = kzalloc(sizeof(*ks), GFP_KERNEL);
+       ks = devm_kzalloc(&spi->dev, sizeof(*ks), GFP_KERNEL);
        if (!ks)
                return -ENOMEM;
 
@@ -291,14 +291,14 @@ static int ks8995_probe(struct spi_device *spi)
        err = spi_setup(spi);
        if (err) {
                dev_err(&spi->dev, "spi_setup failed, err=%d\n", err);
-               goto err_drvdata;
+               return err;
        }
 
        err = ks8995_read(ks, ids, KS8995_REG_ID0, sizeof(ids));
        if (err < 0) {
                dev_err(&spi->dev, "unable to read id registers, err=%d\n",
                                err);
-               goto err_drvdata;
+               return err;
        }
 
        switch (ids[0]) {
@@ -306,8 +306,7 @@ static int ks8995_probe(struct spi_device *spi)
                break;
        default:
                dev_err(&spi->dev, "unknown family id:%02x\n", ids[0]);
-               err = -ENODEV;
-               goto err_drvdata;
+               return -ENODEV;
        }
 
        memcpy(&ks->regs_attr, &ks8995_registers_attr, sizeof(ks->regs_attr));
@@ -320,24 +319,24 @@ static int ks8995_probe(struct spi_device *spi)
                        dev_err(&spi->dev,
                                "unable to read chip id register, err=%d\n",
                                err);
-                       goto err_drvdata;
+                       return err;
                }
                if ((val & 0x80) == 0) {
                        dev_err(&spi->dev, "unknown chip:%02x,0\n", ids[1]);
-                       goto err_drvdata;
+                       return err;
                }
                ks->regs_attr.size = KSZ8864_REGS_SIZE;
        }
 
        err = ks8995_reset(ks);
        if (err)
-               goto err_drvdata;
+               return err;
 
        err = sysfs_create_bin_file(&spi->dev.kobj, &ks->regs_attr);
        if (err) {
                dev_err(&spi->dev, "unable to create sysfs file, err=%d\n",
                                    err);
-               goto err_drvdata;
+               return err;
        }
 
        if (get_chip_id(ids[1]) == CHIPID_M) {
@@ -350,21 +349,12 @@ static int ks8995_probe(struct spi_device *spi)
        }
 
        return 0;
-
-err_drvdata:
-       kfree(ks);
-       return err;
 }
 
 static int ks8995_remove(struct spi_device *spi)
 {
-       struct ks8995_data      *ks8995;
-
-       ks8995 = spi_get_drvdata(spi);
        sysfs_remove_bin_file(&spi->dev.kobj, &ks8995_registers_attr);
 
-       kfree(ks8995);
-
        return 0;
 }
 
index 91d6c12..fa0d717 100644 (file)
@@ -143,8 +143,8 @@ struct ppp {
        struct sk_buff_head mrq;        /* MP: receive reconstruction queue */
 #endif /* CONFIG_PPP_MULTILINK */
 #ifdef CONFIG_PPP_FILTER
-       struct sk_filter *pass_filter;  /* filter for packets to pass */
-       struct sk_filter *active_filter;/* filter for pkts to reset idle */
+       struct bpf_prog *pass_filter;   /* filter for packets to pass */
+       struct bpf_prog *active_filter; /* filter for pkts to reset idle */
 #endif /* CONFIG_PPP_FILTER */
        struct net      *ppp_net;       /* the net we belong to */
        struct ppp_link_stats stats64;  /* 64 bit network stats */
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
 {
        struct sock_fprog uprog;
        struct sock_filter *code = NULL;
-       int len, err;
+       int len;
 
        if (copy_from_user(&uprog, arg, sizeof(uprog)))
                return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
        if (IS_ERR(code))
                return PTR_ERR(code);
 
-       err = sk_chk_filter(code, uprog.len);
-       if (err) {
-               kfree(code);
-               return err;
-       }
-
        *p = code;
        return uprog.len;
 }
@@ -661,6 +655,10 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        break;
                ppp_lock(ppp);
                cflags = ppp->flags & ~val;
+#ifdef CONFIG_PPP_MULTILINK
+               if (!(ppp->flags & SC_MULTILINK) && (val & SC_MULTILINK))
+                       ppp->nextseq = 0;
+#endif
                ppp->flags = val & SC_FLAG_BITS;
                ppp_unlock(ppp);
                if (cflags & SC_CCP_OPEN)
@@ -763,10 +761,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->pass_filter)
-                               sk_unattached_filter_destroy(ppp->pass_filter);
-                       err = sk_unattached_filter_create(&ppp->pass_filter,
-                                                         &fprog);
+                       if (ppp->pass_filter) {
+                               bpf_prog_destroy(ppp->pass_filter);
+                               ppp->pass_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = bpf_prog_create(&ppp->pass_filter,
+                                                     &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
@@ -784,10 +787,15 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
                        };
 
                        ppp_lock(ppp);
-                       if (ppp->active_filter)
-                               sk_unattached_filter_destroy(ppp->active_filter);
-                       err = sk_unattached_filter_create(&ppp->active_filter,
-                                                         &fprog);
+                       if (ppp->active_filter) {
+                               bpf_prog_destroy(ppp->active_filter);
+                               ppp->active_filter = NULL;
+                       }
+                       if (fprog.filter != NULL)
+                               err = bpf_prog_create(&ppp->active_filter,
+                                                     &fprog);
+                       else
+                               err = 0;
                        kfree(code);
                        ppp_unlock(ppp);
                }
@@ -1197,7 +1205,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                   a four-byte PPP header on each packet */
                *skb_push(skb, 2) = 1;
                if (ppp->pass_filter &&
-                   SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
+                   BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
                        if (ppp->debug & 1)
                                netdev_printk(KERN_DEBUG, ppp->dev,
                                              "PPP: outbound frame "
@@ -1207,7 +1215,7 @@ ppp_send_frame(struct ppp *ppp, struct sk_buff *skb)
                }
                /* if this packet passes the active filter, record the time */
                if (!(ppp->active_filter &&
-                     SK_RUN_FILTER(ppp->active_filter, skb) == 0))
+                     BPF_PROG_RUN(ppp->active_filter, skb) == 0))
                        ppp->last_xmit = jiffies;
                skb_pull(skb, 2);
 #else
@@ -1831,7 +1839,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
 
                        *skb_push(skb, 2) = 0;
                        if (ppp->pass_filter &&
-                           SK_RUN_FILTER(ppp->pass_filter, skb) == 0) {
+                           BPF_PROG_RUN(ppp->pass_filter, skb) == 0) {
                                if (ppp->debug & 1)
                                        netdev_printk(KERN_DEBUG, ppp->dev,
                                                      "PPP: inbound frame "
@@ -1840,7 +1848,7 @@ ppp_receive_nonmp_frame(struct ppp *ppp, struct sk_buff *skb)
                                return;
                        }
                        if (!(ppp->active_filter &&
-                             SK_RUN_FILTER(ppp->active_filter, skb) == 0))
+                             BPF_PROG_RUN(ppp->active_filter, skb) == 0))
                                ppp->last_recv = jiffies;
                        __skb_pull(skb, 2);
                } else
@@ -2665,7 +2673,8 @@ ppp_create_interface(struct net *net, int unit, int *retp)
        int ret = -ENOMEM;
        int i;
 
-       dev = alloc_netdev(sizeof(struct ppp), "", ppp_setup);
+       dev = alloc_netdev(sizeof(struct ppp), "", NET_NAME_UNKNOWN,
+                          ppp_setup);
        if (!dev)
                goto out1;
 
@@ -2820,12 +2829,12 @@ static void ppp_destroy_interface(struct ppp *ppp)
 #endif /* CONFIG_PPP_MULTILINK */
 #ifdef CONFIG_PPP_FILTER
        if (ppp->pass_filter) {
-               sk_unattached_filter_destroy(ppp->pass_filter);
+               bpf_prog_destroy(ppp->pass_filter);
                ppp->pass_filter = NULL;
        }
 
        if (ppp->active_filter) {
-               sk_unattached_filter_destroy(ppp->active_filter);
+               bpf_prog_destroy(ppp->active_filter);
                ppp->active_filter = NULL;
        }
 #endif /* CONFIG_PPP_FILTER */
index 2ea7efd..6c9c16d 100644 (file)
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
                po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
                                   dev->hard_header_len);
 
-               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr);
+               po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
                po->chan.private = sk;
                po->chan.ops = &pppoe_chan_ops;
 
index 1252d9c..079f7ad 100644 (file)
@@ -396,7 +396,6 @@ found:
                   ntohs(cs->cs_ip.tot_len) == hlen)
                        break;
                goto uncompressed;
-               break;
        case SPECIAL_I:
        case SPECIAL_D:
                /* actual changes match one of our special case encodings --
index ad4a94e..05387b1 100644 (file)
@@ -83,6 +83,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/workqueue.h>
 #include "slip.h"
 #ifdef CONFIG_INET
 #include <linux/ip.h>
@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
 #endif
 }
 
-/*
- * Called by the driver when there's room for more data.  If we have
- * more packets to send, we send them here.
- */
-static void slip_write_wakeup(struct tty_struct *tty)
+/* Write out any remaining transmit buffer. Scheduled when tty is writable */
+static void slip_transmit(struct work_struct *work)
 {
+       struct slip *sl = container_of(work, struct slip, tx_work);
        int actual;
-       struct slip *sl = tty->disc_data;
 
+       spin_lock_bh(&sl->lock);
        /* First make sure we're connected. */
-       if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev))
+       if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
+               spin_unlock_bh(&sl->lock);
                return;
+       }
 
-       spin_lock_bh(&sl->lock);
        if (sl->xleft <= 0)  {
                /* Now serial buffer is almost free & we can start
                 * transmission of another packet */
                sl->dev->stats.tx_packets++;
-               clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
+               clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
                spin_unlock_bh(&sl->lock);
                sl_unlock(sl);
                return;
        }
 
-       actual = tty->ops->write(tty, sl->xhead, sl->xleft);
+       actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
        sl->xleft -= actual;
        sl->xhead += actual;
        spin_unlock_bh(&sl->lock);
 }
 
+/*
+ * Called by the driver when there's room for more data.
+ * Schedule the transmit.
+ */
+static void slip_write_wakeup(struct tty_struct *tty)
+{
+       struct slip *sl = tty->disc_data;
+
+       schedule_work(&sl->tx_work);
+}
+
 static void sl_tx_timeout(struct net_device *dev)
 {
        struct slip *sl = netdev_priv(dev);
@@ -738,7 +749,7 @@ static struct slip *sl_alloc(dev_t line)
                return NULL;
 
        sprintf(name, "sl%d", i);
-       dev = alloc_netdev(sizeof(*sl), name, sl_setup);
+       dev = alloc_netdev(sizeof(*sl), name, NET_NAME_UNKNOWN, sl_setup);
        if (!dev)
                return NULL;
 
@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
        sl->magic       = SLIP_MAGIC;
        sl->dev         = dev;
        spin_lock_init(&sl->lock);
+       INIT_WORK(&sl->tx_work, slip_transmit);
        sl->mode        = SL_MODE_DEFAULT;
 #ifdef CONFIG_SLIP_SMART
        /* initialize timer_list struct */
@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
        if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
                return;
 
+       spin_lock_bh(&sl->lock);
        tty->disc_data = NULL;
        sl->tty = NULL;
+       spin_unlock_bh(&sl->lock);
+
+       flush_work(&sl->tx_work);
 
        /* VSV = very important to remove timers */
 #ifdef CONFIG_SLIP_SMART
index 67673cf..cf32aad 100644 (file)
@@ -53,6 +53,7 @@ struct slip {
   struct tty_struct    *tty;           /* ptr to TTY structure         */
   struct net_device    *dev;           /* easy for intr handling       */
   spinlock_t           lock;
+  struct work_struct   tx_work;        /* Flushes transmit buffer      */
 
 #ifdef SL_INCLUDE_CSLIP
   struct slcompress    *slcomp;        /* for header compression       */
index a58dfeb..a1536d0 100644 (file)
@@ -58,7 +58,7 @@ struct lb_priv_ex {
 };
 
 struct lb_priv {
-       struct sk_filter __rcu *fp;
+       struct bpf_prog __rcu *fp;
        lb_select_tx_port_func_t __rcu *select_tx_port_func;
        struct lb_pcpu_stats __percpu *pcpu_stats;
        struct lb_priv_ex *ex; /* priv extension */
@@ -174,14 +174,14 @@ static lb_select_tx_port_func_t *lb_select_tx_port_get_func(const char *name)
 static unsigned int lb_get_skb_hash(struct lb_priv *lb_priv,
                                    struct sk_buff *skb)
 {
-       struct sk_filter *fp;
+       struct bpf_prog *fp;
        uint32_t lhash;
        unsigned char *c;
 
        fp = rcu_dereference_bh(lb_priv->fp);
        if (unlikely(!fp))
                return 0;
-       lhash = SK_RUN_FILTER(fp, skb);
+       lhash = BPF_PROG_RUN(fp, skb);
        c = (char *) &lhash;
        return c[0] ^ c[1] ^ c[2] ^ c[3];
 }
@@ -271,8 +271,8 @@ static void __fprog_destroy(struct sock_fprog_kern *fprog)
 static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
 {
        struct lb_priv *lb_priv = get_lb_priv(team);
-       struct sk_filter *fp = NULL;
-       struct sk_filter *orig_fp;
+       struct bpf_prog *fp = NULL;
+       struct bpf_prog *orig_fp = NULL;
        struct sock_fprog_kern *fprog = NULL;
        int err;
 
@@ -281,7 +281,7 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
                                     ctx->data.bin_val.ptr);
                if (err)
                        return err;
-               err = sk_unattached_filter_create(&fp, fprog);
+               err = bpf_prog_create(&fp, fprog);
                if (err) {
                        __fprog_destroy(fprog);
                        return err;
@@ -293,11 +293,15 @@ static int lb_bpf_func_set(struct team *team, struct team_gsetter_ctx *ctx)
                __fprog_destroy(lb_priv->ex->orig_fprog);
                orig_fp = rcu_dereference_protected(lb_priv->fp,
                                                lockdep_is_held(&team->lock));
-               sk_unattached_filter_destroy(orig_fp);
        }
 
        rcu_assign_pointer(lb_priv->fp, fp);
        lb_priv->ex->orig_fprog = fprog;
+
+       if (orig_fp) {
+               synchronize_rcu();
+               bpf_prog_destroy(orig_fp);
+       }
        return 0;
 }
 
index 98bad1f..acaaf67 100644 (file)
@@ -1633,7 +1633,8 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
                        name = ifr->ifr_name;
 
                dev = alloc_netdev_mqs(sizeof(struct tun_struct), name,
-                                      tun_setup, queues, queues);
+                                      NET_NAME_UNKNOWN, tun_setup, queues,
+                                      queues);
 
                if (!dev)
                        return -ENOMEM;
index 054e59c..be42757 100644 (file)
@@ -23,6 +23,8 @@
 #include <linux/usb.h>
 #include <linux/crc32.h>
 #include <linux/usb/usbnet.h>
+#include <uapi/linux/mdio.h>
+#include <linux/mdio.h>
 
 #define AX88179_PHY_ID                         0x03
 #define AX_EEPROM_LEN                          0x100
 #define GMII_PHY_PAGE_SELECT                   0x1f
        #define GMII_PHY_PGSEL_EXT      0x0007
        #define GMII_PHY_PGSEL_PAGE0    0x0000
+       #define GMII_PHY_PGSEL_PAGE3    0x0003
+       #define GMII_PHY_PGSEL_PAGE5    0x0005
 
 struct ax88179_data {
+       u8  eee_enabled;
+       u8  eee_active;
        u16 rxctl;
        u16 reserved;
 };
@@ -373,6 +379,60 @@ static void ax88179_mdio_write(struct net_device *netdev, int phy_id, int loc,
        ax88179_write_cmd(dev, AX_ACCESS_PHY, phy_id, (__u16)loc, 2, &res);
 }
 
+static inline int ax88179_phy_mmd_indirect(struct usbnet *dev, u16 prtad,
+                                          u16 devad)
+{
+       u16 tmp16;
+       int ret;
+
+       tmp16 = devad;
+       ret = ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                               MII_MMD_CTRL, 2, &tmp16);
+
+       tmp16 = prtad;
+       ret = ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                               MII_MMD_DATA, 2, &tmp16);
+
+       tmp16 = devad | MII_MMD_CTRL_NOINCR;
+       ret = ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                               MII_MMD_CTRL, 2, &tmp16);
+
+       return ret;
+}
+
+static int
+ax88179_phy_read_mmd_indirect(struct usbnet *dev, u16 prtad, u16 devad)
+{
+       int ret;
+       u16 tmp16;
+
+       ax88179_phy_mmd_indirect(dev, prtad, devad);
+
+       ret = ax88179_read_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                              MII_MMD_DATA, 2, &tmp16);
+       if (ret < 0)
+               return ret;
+
+       return tmp16;
+}
+
+static int
+ax88179_phy_write_mmd_indirect(struct usbnet *dev, u16 prtad, u16 devad,
+                              u16 data)
+{
+       int ret;
+
+       ax88179_phy_mmd_indirect(dev, prtad, devad);
+
+       ret = ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                               MII_MMD_DATA, 2, &data);
+
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
 static int ax88179_suspend(struct usb_interface *intf, pm_message_t message)
 {
        struct usbnet *dev = usb_get_intfdata(intf);
@@ -572,6 +632,185 @@ static int ax88179_set_settings(struct net_device *net, struct ethtool_cmd *cmd)
        return mii_ethtool_sset(&dev->mii, cmd);
 }
 
+static int
+ax88179_ethtool_get_eee(struct usbnet *dev, struct ethtool_eee *data)
+{
+       int val;
+
+       /* Get Supported EEE */
+       val = ax88179_phy_read_mmd_indirect(dev, MDIO_PCS_EEE_ABLE,
+                                           MDIO_MMD_PCS);
+       if (val < 0)
+               return val;
+       data->supported = mmd_eee_cap_to_ethtool_sup_t(val);
+
+       /* Get advertisement EEE */
+       val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_ADV,
+                                           MDIO_MMD_AN);
+       if (val < 0)
+               return val;
+       data->advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+
+       /* Get LP advertisement EEE */
+       val = ax88179_phy_read_mmd_indirect(dev, MDIO_AN_EEE_LPABLE,
+                                           MDIO_MMD_AN);
+       if (val < 0)
+               return val;
+       data->lp_advertised = mmd_eee_adv_to_ethtool_adv_t(val);
+
+       return 0;
+}
+
+static int
+ax88179_ethtool_set_eee(struct usbnet *dev, struct ethtool_eee *data)
+{
+       u16 tmp16 = ethtool_adv_to_mmd_eee_adv_t(data->advertised);
+
+       return ax88179_phy_write_mmd_indirect(dev, MDIO_AN_EEE_ADV,
+                                             MDIO_MMD_AN, tmp16);
+}
+
+static int ax88179_chk_eee(struct usbnet *dev)
+{
+       struct ethtool_cmd ecmd = { .cmd = ETHTOOL_GSET };
+       struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+
+       mii_ethtool_gset(&dev->mii, &ecmd);
+
+       if (ecmd.duplex & DUPLEX_FULL) {
+               int eee_lp, eee_cap, eee_adv;
+               u32 lp, cap, adv, supported = 0;
+
+               eee_cap = ax88179_phy_read_mmd_indirect(dev,
+                                                       MDIO_PCS_EEE_ABLE,
+                                                       MDIO_MMD_PCS);
+               if (eee_cap < 0) {
+                       priv->eee_active = 0;
+                       return false;
+               }
+
+               cap = mmd_eee_cap_to_ethtool_sup_t(eee_cap);
+               if (!cap) {
+                       priv->eee_active = 0;
+                       return false;
+               }
+
+               eee_lp = ax88179_phy_read_mmd_indirect(dev,
+                                                      MDIO_AN_EEE_LPABLE,
+                                                      MDIO_MMD_AN);
+               if (eee_lp < 0) {
+                       priv->eee_active = 0;
+                       return false;
+               }
+
+               eee_adv = ax88179_phy_read_mmd_indirect(dev,
+                                                       MDIO_AN_EEE_ADV,
+                                                       MDIO_MMD_AN);
+
+               if (eee_adv < 0) {
+                       priv->eee_active = 0;
+                       return false;
+               }
+
+               adv = mmd_eee_adv_to_ethtool_adv_t(eee_adv);
+               lp = mmd_eee_adv_to_ethtool_adv_t(eee_lp);
+               supported = (ecmd.speed == SPEED_1000) ?
+                            SUPPORTED_1000baseT_Full :
+                            SUPPORTED_100baseT_Full;
+
+               if (!(lp & adv & supported)) {
+                       priv->eee_active = 0;
+                       return false;
+               }
+
+               priv->eee_active = 1;
+               return true;
+       }
+
+       priv->eee_active = 0;
+       return false;
+}
+
+static void ax88179_disable_eee(struct usbnet *dev)
+{
+       u16 tmp16;
+
+       tmp16 = GMII_PHY_PGSEL_PAGE3;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp16);
+
+       tmp16 = 0x3246;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         MII_PHYADDR, 2, &tmp16);
+
+       tmp16 = GMII_PHY_PGSEL_PAGE0;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp16);
+}
+
+static void ax88179_enable_eee(struct usbnet *dev)
+{
+       u16 tmp16;
+
+       tmp16 = GMII_PHY_PGSEL_PAGE3;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp16);
+
+       tmp16 = 0x3247;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         MII_PHYADDR, 2, &tmp16);
+
+       tmp16 = GMII_PHY_PGSEL_PAGE5;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp16);
+
+       tmp16 = 0x0680;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         MII_BMSR, 2, &tmp16);
+
+       tmp16 = GMII_PHY_PGSEL_PAGE0;
+       ax88179_write_cmd(dev, AX_ACCESS_PHY, AX88179_PHY_ID,
+                         GMII_PHY_PAGE_SELECT, 2, &tmp16);
+}
+
+static int ax88179_get_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+
+       edata->eee_enabled = priv->eee_enabled;
+       edata->eee_active = priv->eee_active;
+
+       return ax88179_ethtool_get_eee(dev, edata);
+}
+
+static int ax88179_set_eee(struct net_device *net, struct ethtool_eee *edata)
+{
+       struct usbnet *dev = netdev_priv(net);
+       struct ax88179_data *priv = (struct ax88179_data *)dev->data;
+       int ret = -EOPNOTSUPP;
+
+       priv->eee_enabled = edata->eee_enabled;
+       if (!priv->eee_enabled) {
+               ax88179_disable_eee(dev);
+       } else {
+               priv->eee_enabled = ax88179_chk_eee(dev);
+               if (!priv->eee_enabled)
+                       return -EOPNOTSUPP;
+
+               ax88179_enable_eee(dev);
+       }
+
+       ret = ax88179_ethtool_set_eee(dev, edata);
+       if (ret)
+               return ret;
+
+       mii_nway_restart(&dev->mii);
+
+       usbnet_link_change(dev, 0, 0);
+
+       return ret;
+}
 
 static int ax88179_ioctl(struct net_device *net, struct ifreq *rq, int cmd)
 {
@@ -589,6 +828,8 @@ static const struct ethtool_ops ax88179_ethtool_ops = {
        .get_eeprom             = ax88179_get_eeprom,
        .get_settings           = ax88179_get_settings,
        .set_settings           = ax88179_set_settings,
+       .get_eee                = ax88179_get_eee,
+       .set_eee                = ax88179_set_eee,
        .nway_reset             = usbnet_nway_reset,
 };
 
@@ -980,6 +1221,7 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
        u16 *tmp16;
        u8 *tmp;
        struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+       struct ethtool_eee eee_data;
 
        usbnet_get_endpoints(dev, intf);
 
@@ -1062,6 +1304,15 @@ static int ax88179_bind(struct usbnet *dev, struct usb_interface *intf)
 
        ax88179_led_setting(dev);
 
+       ax179_data->eee_enabled = 0;
+       ax179_data->eee_active = 0;
+
+       ax88179_disable_eee(dev);
+
+       ax88179_ethtool_get_eee(dev, &eee_data);
+       eee_data.advertised = 0;
+       ax88179_ethtool_set_eee(dev, &eee_data);
+
        /* Restart autoneg */
        mii_nway_restart(&dev->mii);
 
@@ -1261,6 +1512,8 @@ static int ax88179_link_reset(struct usbnet *dev)
        ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_MEDIUM_STATUS_MODE,
                          2, 2, &mode);
 
+       ax179_data->eee_enabled = ax88179_chk_eee(dev);
+
        netif_carrier_on(dev->net);
 
        return 0;
@@ -1271,6 +1524,8 @@ static int ax88179_reset(struct usbnet *dev)
        u8 buf[5];
        u16 *tmp16;
        u8 *tmp;
+       struct ax88179_data *ax179_data = (struct ax88179_data *)dev->data;
+       struct ethtool_eee eee_data;
 
        tmp16 = (u16 *)buf;
        tmp = (u8 *)buf;
@@ -1340,6 +1595,15 @@ static int ax88179_reset(struct usbnet *dev)
 
        ax88179_led_setting(dev);
 
+       ax179_data->eee_enabled = 0;
+       ax179_data->eee_active = 0;
+
+       ax88179_disable_eee(dev);
+
+       ax88179_ethtool_get_eee(dev, &eee_data);
+       eee_data.advertised = 0;
+       ax88179_ethtool_set_eee(dev, &eee_data);
+
        /* Restart autoneg */
        mii_nway_restart(&dev->mii);
 
index 6358d42..2ec1500 100644 (file)
@@ -387,7 +387,7 @@ static int usbpn_probe(struct usb_interface *intf, const struct usb_device_id *i
                return -EINVAL;
 
        dev = alloc_netdev(sizeof(*pnd) + sizeof(pnd->urbs[0]) * rxq_size,
-                               ifname, usbpn_setup);
+                          ifname, NET_NAME_UNKNOWN, usbpn_setup);
        if (!dev)
                return -ENOMEM;
 
index 9ea4bfe..2a32d91 100644 (file)
@@ -341,6 +341,22 @@ next_desc:
                usb_driver_release_interface(driver, info->data);
                return -ENODEV;
        }
+
+       /* Some devices don't initialise properly. In particular
+        * the packet filter is not reset. There are devices that
+        * don't do reset all the way. So the packet filter should
+        * be set to a sane initial value.
+        */
+       usb_control_msg(dev->udev,
+                       usb_sndctrlpipe(dev->udev, 0),
+                       USB_CDC_SET_ETHERNET_PACKET_FILTER,
+                       USB_TYPE_CLASS | USB_RECIP_INTERFACE,
+                       USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
+                       intf->cur_altsetting->desc.bInterfaceNumber,
+                       NULL,
+                       0,
+                       USB_CTRL_SET_TIMEOUT
+               );
        return 0;
 
 bad_desc:
index a3a0586..babda7d 100644 (file)
@@ -258,10 +258,8 @@ struct hso_serial {
         * so as not to drop characters on the floor.
         */
        int  curr_rx_urb_idx;
-       u16  curr_rx_urb_offset;
        u8   rx_urb_filled[MAX_RX_URBS];
        struct tasklet_struct unthrottle_tasklet;
-       struct work_struct    retry_unthrottle_workqueue;
 };
 
 struct hso_device {
@@ -469,6 +467,7 @@ static const struct usb_device_id hso_ids[] = {
        {USB_DEVICE(0x0af0, 0x8800)},
        {USB_DEVICE(0x0af0, 0x8900)},
        {USB_DEVICE(0x0af0, 0x9000)},
+       {USB_DEVICE(0x0af0, 0x9200)},           /* Option GTM671WFS */
        {USB_DEVICE(0x0af0, 0xd035)},
        {USB_DEVICE(0x0af0, 0xd055)},
        {USB_DEVICE(0x0af0, 0xd155)},
@@ -1252,14 +1251,6 @@ static   void hso_unthrottle(struct tty_struct *tty)
        tasklet_hi_schedule(&serial->unthrottle_tasklet);
 }
 
-static void hso_unthrottle_workfunc(struct work_struct *work)
-{
-       struct hso_serial *serial =
-           container_of(work, struct hso_serial,
-                        retry_unthrottle_workqueue);
-       hso_unthrottle_tasklet(serial);
-}
-
 /* open the requested serial port */
 static int hso_serial_open(struct tty_struct *tty, struct file *filp)
 {
@@ -1295,8 +1286,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
                tasklet_init(&serial->unthrottle_tasklet,
                             (void (*)(unsigned long))hso_unthrottle_tasklet,
                             (unsigned long)serial);
-               INIT_WORK(&serial->retry_unthrottle_workqueue,
-                         hso_unthrottle_workfunc);
                result = hso_start_serial_device(serial->parent, GFP_KERNEL);
                if (result) {
                        hso_stop_serial_device(serial->parent);
@@ -1345,7 +1334,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
                if (!usb_gone)
                        hso_stop_serial_device(serial->parent);
                tasklet_kill(&serial->unthrottle_tasklet);
-               cancel_work_sync(&serial->retry_unthrottle_workqueue);
        }
 
        if (!usb_gone)
@@ -2013,8 +2001,7 @@ static void ctrl_callback(struct urb *urb)
 static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
 {
        struct tty_struct *tty;
-       int write_length_remaining = 0;
-       int curr_write_len;
+       int count;
 
        /* Sanity check */
        if (urb == NULL || serial == NULL) {
@@ -2024,29 +2011,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
 
        tty = tty_port_tty_get(&serial->port);
 
+       if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
+               tty_kref_put(tty);
+               return -1;
+       }
+
        /* Push data to tty */
-       write_length_remaining = urb->actual_length -
-               serial->curr_rx_urb_offset;
        D1("data to push to tty");
-       while (write_length_remaining) {
-               if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
-                       tty_kref_put(tty);
-                       return -1;
-               }
-               curr_write_len = tty_insert_flip_string(&serial->port,
-                       urb->transfer_buffer + serial->curr_rx_urb_offset,
-                       write_length_remaining);
-               serial->curr_rx_urb_offset += curr_write_len;
-               write_length_remaining -= curr_write_len;
+       count = tty_buffer_request_room(&serial->port, urb->actual_length);
+       if (count >= urb->actual_length) {
+               tty_insert_flip_string(&serial->port, urb->transfer_buffer,
+                                      urb->actual_length);
                tty_flip_buffer_push(&serial->port);
+       } else {
+               dev_warn(&serial->parent->usb->dev,
+                        "dropping data, %d bytes lost\n", urb->actual_length);
        }
+
        tty_kref_put(tty);
 
-       if (write_length_remaining == 0) {
-               serial->curr_rx_urb_offset = 0;
-               serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
-       }
-       return write_length_remaining;
+       serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
+
+       return 0;
 }
 
 
@@ -2217,7 +2203,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
                }
        }
        serial->curr_rx_urb_idx = 0;
-       serial->curr_rx_urb_offset = 0;
 
        if (serial->tx_urb)
                usb_kill_urb(serial->tx_urb);
@@ -2520,7 +2505,8 @@ static struct hso_device *hso_create_net_device(struct usb_interface *interface,
 
        /* allocate our network device, then we can put in our private data */
        /* call hso_net_init to do the basic initialization */
-       net = alloc_netdev(sizeof(struct hso_net), "hso%d", hso_net_init);
+       net = alloc_netdev(sizeof(struct hso_net), "hso%d", NET_NAME_UNKNOWN,
+                          hso_net_init);
        if (!net) {
                dev_err(&interface->dev, "Unable to create ethernet device\n");
                goto exit;
index f9822bc..735f7da 100644 (file)
@@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
        ctx = drvstate->ctx;
 
        if (usbnet_dev->status)
-               /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256
-                * decimal (0x100)"
+               /* The wMaxCommand buffer must be big enough to hold
+                * any message from the modem. Experience has shown
+                * that some replies are more than 256 bytes long
                 */
                subdriver = usb_cdc_wdm_register(ctx->control,
                                                 &usbnet_dev->status->desc,
-                                                256, /* wMaxCommand */
+                                                1024, /* wMaxCommand */
                                                 huawei_cdc_ncm_wdm_manage_power);
        if (IS_ERR(subdriver)) {
                ret = PTR_ERR(subdriver);
@@ -193,6 +194,9 @@ static const struct usb_device_id huawei_cdc_ncm_devs[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x02, 0x76),
          .driver_info = (unsigned long)&huawei_cdc_ncm_info,
        },
+       { USB_VENDOR_AND_INTERFACE_INFO(0x12d1, 0xff, 0x03, 0x16),
+         .driver_info = (unsigned long)&huawei_cdc_ncm_info,
+       },
 
        /* Terminating entry */
        {
index cf62d7e..22756db 100644 (file)
@@ -667,6 +667,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x05c6, 0x9084, 4)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 0)},
        {QMI_FIXED_INTF(0x05c6, 0x920d, 5)},
+       {QMI_FIXED_INTF(0x0846, 0x68a2, 8)},
        {QMI_FIXED_INTF(0x12d1, 0x140c, 1)},    /* Huawei E173 */
        {QMI_FIXED_INTF(0x12d1, 0x14ac, 1)},    /* Huawei E1820 */
        {QMI_FIXED_INTF(0x16d8, 0x6003, 0)},    /* CMOTech 6003 */
@@ -741,6 +742,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
        {QMI_FIXED_INTF(0x19d2, 0x1426, 2)},    /* ZTE MF91 */
+       {QMI_FIXED_INTF(0x19d2, 0x1428, 2)},    /* Telewell TW-LTE 4G v2 */
        {QMI_FIXED_INTF(0x19d2, 0x2002, 4)},    /* ZTE (Vodafone) K3765-Z */
        {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)},    /* Sierra Wireless MC7700 */
        {QMI_FIXED_INTF(0x114f, 0x68a2, 8)},    /* Sierra Wireless MC7750 */
@@ -756,6 +758,7 @@ static const struct usb_device_id products[] = {
        {QMI_FIXED_INTF(0x1199, 0x9054, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1199, 0x9055, 8)},    /* Netgear AirCard 341U */
        {QMI_FIXED_INTF(0x1199, 0x9056, 8)},    /* Sierra Wireless Modem */
+       {QMI_FIXED_INTF(0x1199, 0x9057, 8)},
        {QMI_FIXED_INTF(0x1199, 0x9061, 8)},    /* Sierra Wireless Modem */
        {QMI_FIXED_INTF(0x1bbb, 0x011e, 4)},    /* Telekom Speedstick LTE II (Alcatel One Touch L100V LTE) */
        {QMI_FIXED_INTF(0x1bbb, 0x0203, 2)},    /* Alcatel L800MA */
index 2543196..87f7104 100644 (file)
@@ -59,6 +59,7 @@
 #define PLA_WDT6_CTRL          0xe428
 #define PLA_TCR0               0xe610
 #define PLA_TCR1               0xe612
+#define PLA_MTPS               0xe615
 #define PLA_TXFIFO_CTRL                0xe618
 #define PLA_RSTTALLY           0xe800
 #define PLA_CR                 0xe813
 /* PLA_TCR1 */
 #define VERSION_MASK           0x7cf0
 
+/* PLA_MTPS */
+#define MTPS_JUMBO             (12 * 1024 / 64)
+#define MTPS_DEFAULT           (6 * 1024 / 64)
+
 /* PLA_RSTTALLY */
 #define TALLY_RESET            0x0001
 
 /* USB_DEV_STAT */
 #define STAT_SPEED_MASK                0x0006
 #define STAT_SPEED_HIGH                0x0000
-#define STAT_SPEED_FULL                0x0001
+#define STAT_SPEED_FULL                0x0002
 
 /* USB_TX_AGG */
 #define TX_AGG_MAX_THRESHOLD   0x03
@@ -440,8 +445,11 @@ enum rtl_register_content {
 #define BYTE_EN_START_MASK     0x0f
 #define BYTE_EN_END_MASK       0xf0
 
+#define RTL8153_MAX_PACKET     9216 /* 9K */
+#define RTL8153_MAX_MTU                (RTL8153_MAX_PACKET - VLAN_ETH_HLEN - VLAN_HLEN)
 #define RTL8152_RMS            (VLAN_ETH_FRAME_LEN + VLAN_HLEN)
-#define RTL8152_TX_TIMEOUT     (HZ)
+#define RTL8153_RMS            RTL8153_MAX_PACKET
+#define RTL8152_TX_TIMEOUT     (5 * HZ)
 
 /* rtl8152 flags */
 enum rtl8152_flags {
@@ -1359,7 +1367,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
                struct sk_buff_head seg_list;
                struct sk_buff *segs, *nskb;
 
-               features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO);
+               features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
                segs = skb_gso_segment(skb, features);
                if (IS_ERR(segs) || !segs)
                        goto drop;
@@ -2292,9 +2300,8 @@ static void r8152b_exit_oob(struct r8152 *tp)
        /* rx share fifo credit full threshold */
        ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL0, RXFIFO_THR1_NORMAL);
 
-       ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_DEV_STAT);
-       ocp_data &= STAT_SPEED_MASK;
-       if (ocp_data == STAT_SPEED_FULL) {
+       if (tp->udev->speed == USB_SPEED_FULL ||
+           tp->udev->speed == USB_SPEED_LOW) {
                /* rx share fifo credit near full threshold */
                ocp_write_dword(tp, MCU_TYPE_PLA, PLA_RXFIFO_CTRL1,
                                RXFIFO_THR2_FULL);
@@ -2522,7 +2529,8 @@ static void r8153_first_init(struct r8152 *tp)
        ocp_data &= ~CPCR_RX_VLAN;
        ocp_write_word(tp, MCU_TYPE_PLA, PLA_CPCR, ocp_data);
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
+       ocp_write_byte(tp, MCU_TYPE_PLA, PLA_MTPS, MTPS_JUMBO);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TCR0);
        ocp_data |= TCR0_AUTO_FIFO;
@@ -2572,7 +2580,7 @@ static void r8153_enter_oob(struct r8152 *tp)
                mdelay(1);
        }
 
-       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8152_RMS);
+       ocp_write_word(tp, MCU_TYPE_PLA, PLA_RMS, RTL8153_RMS);
 
        ocp_data = ocp_read_word(tp, MCU_TYPE_PLA, PLA_TEREDO_CFG);
        ocp_data &= ~TEREDO_WAKE_MASK;
@@ -3204,8 +3212,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
        struct r8152 *tp = netdev_priv(dev);
        struct tally_counter tally;
 
+       if (usb_autopm_get_interface(tp->intf) < 0)
+               return;
+
        generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
 
+       usb_autopm_put_interface(tp->intf);
+
        data[0] = le64_to_cpu(tally.tx_packets);
        data[1] = le64_to_cpu(tally.rx_packets);
        data[2] = le64_to_cpu(tally.tx_errors);
@@ -3284,6 +3297,26 @@ out:
        return res;
 }
 
+static int rtl8152_change_mtu(struct net_device *dev, int new_mtu)
+{
+       struct r8152 *tp = netdev_priv(dev);
+
+       switch (tp->version) {
+       case RTL_VER_01:
+       case RTL_VER_02:
+               return eth_change_mtu(dev, new_mtu);
+       default:
+               break;
+       }
+
+       if (new_mtu < 68 || new_mtu > RTL8153_MAX_MTU)
+               return -EINVAL;
+
+       dev->mtu = new_mtu;
+
+       return 0;
+}
+
 static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_open               = rtl8152_open,
        .ndo_stop               = rtl8152_close,
@@ -3292,8 +3325,7 @@ static const struct net_device_ops rtl8152_netdev_ops = {
        .ndo_tx_timeout         = rtl8152_tx_timeout,
        .ndo_set_rx_mode        = rtl8152_set_rx_mode,
        .ndo_set_mac_address    = rtl8152_set_mac_address,
-
-       .ndo_change_mtu         = eth_change_mtu,
+       .ndo_change_mtu         = rtl8152_change_mtu,
        .ndo_validate_addr      = eth_validate_addr,
 };
 
index 424db65..d07bf4c 100644 (file)
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
        return ret;
 }
 
+static int smsc95xx_reset_resume(struct usb_interface *intf)
+{
+       struct usbnet *dev = usb_get_intfdata(intf);
+       int ret;
+
+       ret = smsc95xx_reset(dev);
+       if (ret < 0)
+               return ret;
+
+       return smsc95xx_resume(intf);
+}
+
 static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
 {
        skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
        .probe          = usbnet_probe,
        .suspend        = smsc95xx_suspend,
        .resume         = smsc95xx_resume,
-       .reset_resume   = smsc95xx_resume,
+       .reset_resume   = smsc95xx_reset_resume,
        .disconnect     = usbnet_disconnect,
        .disable_hub_initiated_lpm = 1,
        .supports_autosuspend = 1,
index b4a10bc..8ad5965 100644 (file)
@@ -248,6 +248,21 @@ static void veth_dev_free(struct net_device *dev)
        free_netdev(dev);
 }
 
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void veth_poll_controller(struct net_device *dev)
+{
+       /* veth only receives frames when its peer sends one
+        * Since it's a synchronous operation, we are guaranteed
+        * never to have pending data when we poll for it so
+        * there is nothing to do here.
+        *
+        * We need this though so netpoll recognizes us as an interface that
+        * supports polling, which enables bridge devices in virt setups to
+        * still use netconsole
+        */
+}
+#endif /* CONFIG_NET_POLL_CONTROLLER */
+
 static const struct net_device_ops veth_netdev_ops = {
        .ndo_init            = veth_dev_init,
        .ndo_open            = veth_open,
@@ -257,6 +272,9 @@ static const struct net_device_ops veth_netdev_ops = {
        .ndo_get_stats64     = veth_get_stats64,
        .ndo_set_rx_mode     = veth_set_multicast_list,
        .ndo_set_mac_address = eth_mac_addr,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+       .ndo_poll_controller    = veth_poll_controller,
+#endif
 };
 
 #define VETH_FEATURES (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_TSO |    \
@@ -317,6 +335,7 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
        struct veth_priv *priv;
        char ifname[IFNAMSIZ];
        struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
+       unsigned char name_assign_type;
        struct ifinfomsg *ifmp;
        struct net *net;
 
@@ -344,16 +363,20 @@ static int veth_newlink(struct net *src_net, struct net_device *dev,
                tbp = tb;
        }
 
-       if (tbp[IFLA_IFNAME])
+       if (tbp[IFLA_IFNAME]) {
                nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
-       else
+               name_assign_type = NET_NAME_USER;
+       } else {
                snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
+               name_assign_type = NET_NAME_ENUM;
+       }
 
        net = rtnl_link_get_net(src_net, tbp);
        if (IS_ERR(net))
                return PTR_ERR(net);
 
-       peer = rtnl_create_link(net, ifname, &veth_link_ops, tbp);
+       peer = rtnl_create_link(net, ifname, name_assign_type,
+                               &veth_link_ops, tbp);
        if (IS_ERR(peer)) {
                put_net(net);
                return PTR_ERR(peer);
index 7d9f84a..59caa06 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/slab.h>
 #include <linux/cpu.h>
 #include <linux/average.h>
+#include <net/busy_poll.h>
 
 static int napi_weight = NAPI_POLL_WEIGHT;
 module_param(napi_weight, int, 0444);
@@ -521,6 +522,8 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
                skb_shinfo(skb)->gso_segs = 0;
        }
 
+       skb_mark_napi_id(skb, &rq->napi);
+
        netif_receive_skb(skb);
        return;
 
@@ -725,15 +728,12 @@ static void refill_work(struct work_struct *work)
        }
 }
 
-static int virtnet_poll(struct napi_struct *napi, int budget)
+static int virtnet_receive(struct receive_queue *rq, int budget)
 {
-       struct receive_queue *rq =
-               container_of(napi, struct receive_queue, napi);
        struct virtnet_info *vi = rq->vq->vdev->priv;
+       unsigned int len, received = 0;
        void *buf;
-       unsigned int r, len, received = 0;
 
-again:
        while (received < budget &&
               (buf = virtqueue_get_buf(rq->vq, &len)) != NULL) {
                receive_buf(rq, buf, len);
@@ -745,6 +745,18 @@ again:
                        schedule_delayed_work(&vi->refill, 0);
        }
 
+       return received;
+}
+
+static int virtnet_poll(struct napi_struct *napi, int budget)
+{
+       struct receive_queue *rq =
+               container_of(napi, struct receive_queue, napi);
+       unsigned int r, received = 0;
+
+again:
+       received += virtnet_receive(rq, budget - received);
+
        /* Out of packets? */
        if (received < budget) {
                r = virtqueue_enable_cb_prepare(rq->vq);
@@ -760,6 +772,43 @@ again:
        return received;
 }
 
+#ifdef CONFIG_NET_RX_BUSY_POLL
+/* must be called with local_bh_disable()d */
+static int virtnet_busy_poll(struct napi_struct *napi)
+{
+       struct receive_queue *rq =
+               container_of(napi, struct receive_queue, napi);
+       struct virtnet_info *vi = rq->vq->vdev->priv;
+       int r, received = 0, budget = 4;
+
+       if (!(vi->status & VIRTIO_NET_S_LINK_UP))
+               return LL_FLUSH_FAILED;
+
+       if (!napi_schedule_prep(napi))
+               return LL_FLUSH_BUSY;
+
+       virtqueue_disable_cb(rq->vq);
+
+again:
+       received += virtnet_receive(rq, budget);
+
+       r = virtqueue_enable_cb_prepare(rq->vq);
+       clear_bit(NAPI_STATE_SCHED, &napi->state);
+       if (unlikely(virtqueue_poll(rq->vq, r)) &&
+           napi_schedule_prep(napi)) {
+               virtqueue_disable_cb(rq->vq);
+               if (received < budget) {
+                       budget -= received;
+                       goto again;
+               } else {
+                       __napi_schedule(napi);
+               }
+       }
+
+       return received;
+}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
+
 static int virtnet_open(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
@@ -1347,6 +1396,9 @@ static const struct net_device_ops virtnet_netdev = {
 #ifdef CONFIG_NET_POLL_CONTROLLER
        .ndo_poll_controller = virtnet_netpoll,
 #endif
+#ifdef CONFIG_NET_RX_BUSY_POLL
+       .ndo_busy_poll          = virtnet_busy_poll,
+#endif
 };
 
 static void virtnet_config_changed_work(struct work_struct *work)
@@ -1552,6 +1604,7 @@ static int virtnet_alloc_queues(struct virtnet_info *vi)
                vi->rq[i].pages = NULL;
                netif_napi_add(vi->dev, &vi->rq[i].napi, virtnet_poll,
                               napi_weight);
+               napi_hash_add(&vi->rq[i].napi);
 
                sg_init_table(vi->rq[i].sg, ARRAY_SIZE(vi->rq[i].sg));
                ewma_init(&vi->rq[i].mrg_avg_pkt_len, 1, RECEIVE_AVG_WEIGHT);
@@ -1853,11 +1906,13 @@ static int virtnet_freeze(struct virtio_device *vdev)
        netif_device_detach(vi->dev);
        cancel_delayed_work_sync(&vi->refill);
 
-       if (netif_running(vi->dev))
+       if (netif_running(vi->dev)) {
                for (i = 0; i < vi->max_queue_pairs; i++) {
                        napi_disable(&vi->rq[i].napi);
+                       napi_hash_del(&vi->rq[i].napi);
                        netif_napi_del(&vi->rq[i].napi);
                }
+       }
 
        remove_vq_common(vi);
 
index 9739434..b76f7dc 100644 (file)
@@ -2589,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev)
        for (i = 0; i < adapter->num_tx_queues; i++)
                spin_lock_init(&adapter->tx_queue[i].tx_lock);
 
-       err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
-                                   VMXNET3_DEF_RX_RING_SIZE,
+       err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
+                                   adapter->rx_ring_size,
                                    VMXNET3_DEF_RX_RING_SIZE);
        if (err)
                goto queue_err;
@@ -2968,6 +2968,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
        adapter->netdev = netdev;
        adapter->pdev = pdev;
 
+       adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
+       adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+
        spin_lock_init(&adapter->cmd_lock);
        adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
                                             sizeof(struct vmxnet3_adapter),
index 40c1c7b..b725fd9 100644 (file)
@@ -449,8 +449,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
        param->rx_mini_max_pending = 0;
        param->rx_jumbo_max_pending = 0;
 
-       param->rx_pending = adapter->rx_queue[0].rx_ring[0].size;
-       param->tx_pending = adapter->tx_queue[0].tx_ring.size;
+       param->rx_pending = adapter->rx_ring_size;
+       param->tx_pending = adapter->tx_ring_size;
        param->rx_mini_pending = 0;
        param->rx_jumbo_pending = 0;
 }
@@ -529,9 +529,11 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                         * size */
                        netdev_err(netdev, "failed to apply new sizes, "
                                   "try the default ones\n");
+                       new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
+                       new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
                        err = vmxnet3_create_queues(adapter,
-                                                   VMXNET3_DEF_TX_RING_SIZE,
-                                                   VMXNET3_DEF_RX_RING_SIZE,
+                                                   new_tx_ring_size,
+                                                   new_rx_ring_size,
                                                    VMXNET3_DEF_RX_RING_SIZE);
                        if (err) {
                                netdev_err(netdev, "failed to create queues "
@@ -545,6 +547,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
                        netdev_err(netdev, "failed to re-activate, error %d."
                                   " Closing it\n", err);
        }
+       adapter->tx_ring_size = new_tx_ring_size;
+       adapter->rx_ring_size = new_rx_ring_size;
 
 out:
        clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
index 190569d..29ee77f 100644 (file)
@@ -349,6 +349,11 @@ struct vmxnet3_adapter {
        u32     link_speed; /* in mbps */
 
        u64     tx_timeout_count;
+
+       /* Ring sizes */
+       u32 tx_ring_size;
+       u32 rx_ring_size;
+
        struct work_struct work;
 
        unsigned long  state;    /* VMXNET3_STATE_BIT_xxx */
index ade33ef..1fb7b37 100644 (file)
@@ -33,6 +33,7 @@
 #include <net/ip_tunnels.h>
 #include <net/icmp.h>
 #include <net/udp.h>
+#include <net/udp_tunnel.h>
 #include <net/rtnetlink.h>
 #include <net/route.h>
 #include <net/dsfield.h>
@@ -339,7 +340,7 @@ static int vxlan_fdb_info(struct sk_buff *skb, struct vxlan_dev *vxlan,
        ndm->ndm_state = fdb->state;
        ndm->ndm_ifindex = vxlan->dev->ifindex;
        ndm->ndm_flags = fdb->flags;
-       ndm->ndm_type = NDA_DST;
+       ndm->ndm_type = RTN_UNICAST;
 
        if (send_eth && nla_put(skb, NDA_LLADDR, ETH_ALEN, &fdb->eth_addr))
                goto nla_put_failure;
@@ -933,7 +934,8 @@ out:
 
 /* Dump forwarding table */
 static int vxlan_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
-                         struct net_device *dev, int idx)
+                         struct net_device *dev,
+                         struct net_device *filter_dev, int idx)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        unsigned int h;
@@ -1570,25 +1572,6 @@ static bool route_shortcircuit(struct net_device *dev, struct sk_buff *skb)
        return false;
 }
 
-/* Compute source port for outgoing packet
- *   first choice to use L4 flow hash since it will spread
- *     better and maybe available from hardware
- *   secondary choice is to use jhash on the Ethernet header
- */
-__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb)
-{
-       unsigned int range = (port_max - port_min) + 1;
-       u32 hash;
-
-       hash = skb_get_hash(skb);
-       if (!hash)
-               hash = jhash(skb->data, 2 * ETH_ALEN,
-                            (__force u32) skb->protocol);
-
-       return htons((((u64) hash * range) >> 32) + port_min);
-}
-EXPORT_SYMBOL_GPL(vxlan_src_port);
-
 static inline struct sk_buff *vxlan_handle_offloads(struct sk_buff *skb,
                                                    bool udp_csum)
 {
@@ -1807,7 +1790,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
        if (tos == 1)
                tos = ip_tunnel_get_dsfield(old_iph, skb);
 
-       src_port = vxlan_src_port(vxlan->port_min, vxlan->port_max, skb);
+       src_port = udp_flow_src_port(dev_net(dev), skb, vxlan->port_min,
+                                    vxlan->port_max, true);
 
        if (dst->sa.sa_family == AF_INET) {
                memset(&fl4, 0, sizeof(fl4));
@@ -2235,7 +2219,6 @@ static void vxlan_setup(struct net_device *dev)
 {
        struct vxlan_dev *vxlan = netdev_priv(dev);
        unsigned int h;
-       int low, high;
 
        eth_hw_addr_random(dev);
        ether_setup(dev);
@@ -2272,9 +2255,6 @@ static void vxlan_setup(struct net_device *dev)
        vxlan->age_timer.function = vxlan_cleanup;
        vxlan->age_timer.data = (unsigned long) vxlan;
 
-       inet_get_local_port_range(dev_net(dev), &low, &high);
-       vxlan->port_min = low;
-       vxlan->port_max = high;
        vxlan->dst_port = htons(vxlan_port);
 
        vxlan->dev = dev;
@@ -2360,102 +2340,37 @@ static void vxlan_del_work(struct work_struct *work)
        kfree_rcu(vs, rcu);
 }
 
-#if IS_ENABLED(CONFIG_IPV6)
-/* Create UDP socket for encapsulation receive. AF_INET6 socket
- * could be used for both IPv4 and IPv6 communications, but
- * users may set bindv6only=1.
- */
-static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
+static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
+                                       __be16 port, u32 flags)
 {
-       struct sock *sk;
        struct socket *sock;
-       struct sockaddr_in6 vxlan_addr = {
-               .sin6_family = AF_INET6,
-               .sin6_port = port,
-       };
-       int rc, val = 1;
-
-       rc = sock_create_kern(AF_INET6, SOCK_DGRAM, IPPROTO_UDP, &sock);
-       if (rc < 0) {
-               pr_debug("UDPv6 socket create failed\n");
-               return ERR_PTR(rc);
-       }
-
-       /* Put in proper namespace */
-       sk = sock->sk;
-       sk_change_net(sk, net);
-
-       kernel_setsockopt(sock, SOL_IPV6, IPV6_V6ONLY,
-                         (char *)&val, sizeof(val));
-       rc = kernel_bind(sock, (struct sockaddr *)&vxlan_addr,
-                        sizeof(struct sockaddr_in6));
-       if (rc < 0) {
-               pr_debug("bind for UDPv6 socket %pI6:%u (%d)\n",
-                        &vxlan_addr.sin6_addr, ntohs(vxlan_addr.sin6_port), rc);
-               sk_release_kernel(sk);
-               return ERR_PTR(rc);
-       }
-       /* At this point, IPv6 module should have been loaded in
-        * sock_create_kern().
-        */
-       BUG_ON(!ipv6_stub);
-
-       /* Disable multicast loopback */
-       inet_sk(sk)->mc_loop = 0;
-
-       if (flags & VXLAN_F_UDP_ZERO_CSUM6_TX)
-               udp_set_no_check6_tx(sk, true);
-
-       if (flags & VXLAN_F_UDP_ZERO_CSUM6_RX)
-               udp_set_no_check6_rx(sk, true);
-
-       return sock;
-}
-
-#else
-
-static struct socket *create_v6_sock(struct net *net, __be16 port, u32 flags)
-{
-               return ERR_PTR(-EPFNOSUPPORT);
-}
-#endif
+       struct udp_port_cfg udp_conf;
+       int err;
 
-static struct socket *create_v4_sock(struct net *net, __be16 port, u32 flags)
-{
-       struct sock *sk;
-       struct socket *sock;
-       struct sockaddr_in vxlan_addr = {
-               .sin_family = AF_INET,
-               .sin_addr.s_addr = htonl(INADDR_ANY),
-               .sin_port = port,
-       };
-       int rc;
+       memset(&udp_conf, 0, sizeof(udp_conf));
 
-       /* Create UDP socket for encapsulation receive. */
-       rc = sock_create_kern(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock);
-       if (rc < 0) {
-               pr_debug("UDP socket create failed\n");
-               return ERR_PTR(rc);
+       if (ipv6) {
+               udp_conf.family = AF_INET6;
+               udp_conf.use_udp6_tx_checksums =
+                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
+               udp_conf.use_udp6_rx_checksums =
+                   !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
+       } else {
+               udp_conf.family = AF_INET;
+               udp_conf.local_ip.s_addr = INADDR_ANY;
+               udp_conf.use_udp_checksums =
+                   !!(flags & VXLAN_F_UDP_CSUM);
        }
 
-       /* Put in proper namespace */
-       sk = sock->sk;
-       sk_change_net(sk, net);
+       udp_conf.local_udp_port = port;
 
-       rc = kernel_bind(sock, (struct sockaddr *) &vxlan_addr,
-                        sizeof(vxlan_addr));
-       if (rc < 0) {
-               pr_debug("bind for UDP socket %pI4:%u (%d)\n",
-                        &vxlan_addr.sin_addr, ntohs(vxlan_addr.sin_port), rc);
-               sk_release_kernel(sk);
-               return ERR_PTR(rc);
-       }
+       /* Open UDP socket */
+       err = udp_sock_create(net, &udp_conf, &sock);
+       if (err < 0)
+               return ERR_PTR(err);
 
        /* Disable multicast loopback */
-       inet_sk(sk)->mc_loop = 0;
-
-       if (!(flags & VXLAN_F_UDP_CSUM))
-               sock->sk->sk_no_check_tx = 1;
+       inet_sk(sock->sk)->mc_loop = 0;
 
        return sock;
 }
@@ -2481,10 +2396,7 @@ static struct vxlan_sock *vxlan_socket_create(struct net *net, __be16 port,
 
        INIT_WORK(&vs->del_work, vxlan_del_work);
 
-       if (ipv6)
-               sock = create_v6_sock(net, port, flags);
-       else
-               sock = create_v4_sock(net, port, flags);
+       sock = vxlan_create_sock(net, ipv6, port, flags);
        if (IS_ERR(sock)) {
                kfree(vs);
                return ERR_CAST(sock);
index 19f7cb2..43c9960 100644 (file)
@@ -255,7 +255,6 @@ static int dlci_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
                                return -EINVAL;
 
                        return dlci_config(dev, ifr->ifr_data, cmd == DLCI_GET_CONF);
-                       break;
 
                default: 
                        return -EOPNOTSUPP;
@@ -327,8 +326,8 @@ static int dlci_add(struct dlci_add *dlci)
                goto err1;
 
        /* create device name */
-       master = alloc_netdev( sizeof(struct dlci_local), "dlci%d",
-                             dlci_setup);
+       master = alloc_netdev(sizeof(struct dlci_local), "dlci%d",
+                             NET_NAME_UNKNOWN, dlci_setup);
        if (!master) {
                err = -ENOMEM;
                goto err1;
index 93ace04..1f04127 100644 (file)
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
        "FarSync TE1"
 };
 
-static void
+static int
 fst_init_card(struct fst_card_info *card)
 {
        int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
         * we'll have to revise it in some way then.
         */
        for (i = 0; i < card->nports; i++) {
-                err = register_hdlc_device(card->ports[i].dev);
-                if (err < 0) {
-                       int j;
+               err = register_hdlc_device(card->ports[i].dev);
+               if (err < 0) {
                        pr_err("Cannot register HDLC device for port %d (errno %d)\n",
-                              i, -err);
-                       for (j = i; j < card->nports; j++) {
-                               free_netdev(card->ports[j].dev);
-                               card->ports[j].dev = NULL;
-                       }
-                        card->nports = i;
-                        break;
-                }
+                               i, -err);
+                       while (i--)
+                               unregister_hdlc_device(card->ports[i].dev);
+                       return err;
+               }
        }
 
        pr_info("%s-%s: %s IRQ%d, %d ports\n",
                port_to_dev(&card->ports[0])->name,
                port_to_dev(&card->ports[card->nports - 1])->name,
                type_strings[card->type], card->irq, card->nports);
+       return 0;
 }
 
 static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        /* Try to enable the device */
        if ((err = pci_enable_device(pdev)) != 0) {
                pr_err("Failed to enable card. Err %d\n", -err);
-               kfree(card);
-               return err;
+               goto enable_fail;
        }
 
        if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
                pr_err("Failed to allocate regions. Err %d\n", -err);
-               pci_disable_device(pdev);
-               kfree(card);
-               return err;
+               goto regions_fail;
        }
 
        /* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        card->phys_ctlmem = pci_resource_start(pdev, 3);
        if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
                pr_err("Physical memory remap failed\n");
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto ioremap_physmem_fail;
        }
        if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
                pr_err("Control memory remap failed\n");
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               iounmap(card->mem);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto ioremap_ctlmem_fail;
        }
        dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
 
        /* Register the interrupt handler */
        if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
                pr_err("Unable to register interrupt %d\n", card->irq);
-               pci_release_regions(pdev);
-               pci_disable_device(pdev);
-               iounmap(card->ctlmem);
-               iounmap(card->mem);
-               kfree(card);
-               return -ENODEV;
+               err = -ENODEV;
+               goto irq_fail;
        }
 
        /* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                        while (i--)
                                free_netdev(card->ports[i].dev);
                        pr_err("FarSync: out of memory\n");
-                        free_irq(card->irq, card);
-                        pci_release_regions(pdev);
-                        pci_disable_device(pdev);
-                        iounmap(card->ctlmem);
-                        iounmap(card->mem);
-                        kfree(card);
-                        return -ENODEV;
+                       err = -ENOMEM;
+                       goto hdlcdev_fail;
                }
                card->ports[i].dev    = dev;
                 card->ports[i].card   = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        pci_set_drvdata(pdev, card);
 
        /* Remainder of card setup */
+       if (no_of_cards_added >= FST_MAX_CARDS) {
+               pr_err("FarSync: too many cards\n");
+               err = -ENOMEM;
+               goto card_array_fail;
+       }
        fst_card_array[no_of_cards_added] = card;
        card->card_no = no_of_cards_added++;    /* Record instance and bump it */
-       fst_init_card(card);
+       err = fst_init_card(card);
+       if (err)
+               goto init_card_fail;
        if (card->family == FST_FAMILY_TXU) {
                /*
                 * Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                                         &card->rx_dma_handle_card);
                if (card->rx_dma_handle_host == NULL) {
                        pr_err("Could not allocate rx dma buffer\n");
-                       fst_disable_intr(card);
-                       pci_release_regions(pdev);
-                       pci_disable_device(pdev);
-                       iounmap(card->ctlmem);
-                       iounmap(card->mem);
-                       kfree(card);
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto rx_dma_fail;
                }
                card->tx_dma_handle_host =
                    pci_alloc_consistent(card->device, FST_MAX_MTU,
                                         &card->tx_dma_handle_card);
                if (card->tx_dma_handle_host == NULL) {
                        pr_err("Could not allocate tx dma buffer\n");
-                       fst_disable_intr(card);
-                       pci_release_regions(pdev);
-                       pci_disable_device(pdev);
-                       iounmap(card->ctlmem);
-                       iounmap(card->mem);
-                       kfree(card);
-                       return -ENOMEM;
+                       err = -ENOMEM;
+                       goto tx_dma_fail;
                }
        }
        return 0;               /* Success */
+
+tx_dma_fail:
+       pci_free_consistent(card->device, FST_MAX_MTU,
+                           card->rx_dma_handle_host,
+                           card->rx_dma_handle_card);
+rx_dma_fail:
+       fst_disable_intr(card);
+       for (i = 0 ; i < card->nports ; i++)
+               unregister_hdlc_device(card->ports[i].dev);
+init_card_fail:
+       fst_card_array[card->card_no] = NULL;
+card_array_fail:
+       for (i = 0 ; i < card->nports ; i++)
+               free_netdev(card->ports[i].dev);
+hdlcdev_fail:
+       free_irq(card->irq, card);
+irq_fail:
+       iounmap(card->ctlmem);
+ioremap_ctlmem_fail:
+       iounmap(card->mem);
+ioremap_physmem_fail:
+       pci_release_regions(pdev);
+regions_fail:
+       pci_disable_device(pdev);
+enable_fail:
+       kfree(card);
+       return err;
 }
 
 /*
index 9c33ca9..51f6cee 100644 (file)
@@ -256,7 +256,8 @@ static void hdlc_setup(struct net_device *dev)
 struct net_device *alloc_hdlcdev(void *priv)
 {
        struct net_device *dev;
-       dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d", hdlc_setup);
+       dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d",
+                          NET_NAME_UNKNOWN, hdlc_setup);
        if (dev)
                dev_to_hdlc(dev)->priv = priv;
        return dev;
index 7c6cb4f..7cc64ea 100644 (file)
@@ -1075,10 +1075,11 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
        used = pvc_is_used(pvc);
 
        if (type == ARPHRD_ETHER) {
-               dev = alloc_netdev(0, "pvceth%d", ether_setup);
+               dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
+                                  ether_setup);
                dev->priv_flags &= ~IFF_TX_SKB_SHARING;
        } else
-               dev = alloc_netdev(0, "pvc%d", pvc_setup);
+               dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
 
        if (!dev) {
                netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
index a33a46f..2f5eda8 100644 (file)
@@ -325,8 +325,8 @@ static int lapbeth_new_device(struct net_device *dev)
 
        ASSERT_RTNL();
 
-       ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", 
-                          lapbeth_setup);
+       ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
+                           lapbeth_setup);
        if (!ndev)
                goto out;
 
index 1b89ecf..758c4ba 100644 (file)
@@ -227,7 +227,8 @@ int __init sbni_probe(int unit)
        struct net_device *dev;
        int err;
 
-       dev = alloc_netdev(sizeof(struct net_local), "sbni", sbni_devsetup);
+       dev = alloc_netdev(sizeof(struct net_local), "sbni",
+                          NET_NAME_UNKNOWN, sbni_devsetup);
        if (!dev)
                return -ENOMEM;
 
@@ -1477,8 +1478,8 @@ int __init init_module( void )
        int err;
 
        while( num < SBNI_MAX_NUM_CARDS ) {
-               dev = alloc_netdev(sizeof(struct net_local), 
-                                  "sbni%d", sbni_devsetup);
+               dev = alloc_netdev(sizeof(struct net_local), "sbni%d",
+                                  NET_NAME_UNKNOWN, sbni_devsetup);
                if( !dev)
                        break;
 
index cdd45fb..421ac5f 100644 (file)
@@ -1631,7 +1631,8 @@ static int __init init_sdla(void)
 
        printk("%s.\n", version);
 
-       sdla = alloc_netdev(sizeof(struct frad_local), "sdla0", setup_sdla);
+       sdla = alloc_netdev(sizeof(struct frad_local), "sdla0",
+                           NET_NAME_UNKNOWN, setup_sdla);
        if (!sdla) 
                return -ENOMEM;
 
index 5895f19..5c47b01 100644 (file)
@@ -81,8 +81,8 @@ static struct x25_asy *x25_asy_alloc(void)
                char name[IFNAMSIZ];
                sprintf(name, "x25asy%d", i);
 
-               dev = alloc_netdev(sizeof(struct x25_asy),
-                                  name, x25_asy_setup);
+               dev = alloc_netdev(sizeof(struct x25_asy), name,
+                                  NET_NAME_UNKNOWN, x25_asy_setup);
                if (!dev)
                        return NULL;
 
@@ -122,8 +122,12 @@ static int x25_asy_change_mtu(struct net_device *dev, int newmtu)
 {
        struct x25_asy *sl = netdev_priv(dev);
        unsigned char *xbuff, *rbuff;
-       int len = 2 * newmtu;
+       int len;
 
+       if (newmtu > 65534)
+               return -EINVAL;
+
+       len = 2 * newmtu;
        xbuff = kmalloc(len + 4, GFP_ATOMIC);
        rbuff = kmalloc(len + 4, GFP_ATOMIC);
 
index cd15a93..e7f5910 100644 (file)
@@ -472,7 +472,7 @@ int i2400mu_probe(struct usb_interface *iface,
 
        /* Allocate instance [calls i2400m_netdev_setup() on it]. */
        result = -ENOMEM;
-       net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d",
+       net_dev = alloc_netdev(sizeof(*i2400mu), "wmx%d", NET_NAME_UNKNOWN,
                               i2400mu_netdev_setup);
        if (net_dev == NULL) {
                dev_err(dev, "no memory for network device instance\n");
index 64747d4..b398075 100644 (file)
@@ -2685,7 +2685,8 @@ static struct net_device *init_wifidev(struct airo_info *ai,
                                        struct net_device *ethdev)
 {
        int err;
-       struct net_device *dev = alloc_netdev(0, "wifi%d", wifi_setup);
+       struct net_device *dev = alloc_netdev(0, "wifi%d", NET_NAME_UNKNOWN,
+                                             wifi_setup);
        if (!dev)
                return NULL;
        dev->ml_priv = ethdev->ml_priv;
@@ -2785,7 +2786,7 @@ static struct net_device *_init_airo_card( unsigned short irq, int port,
        CapabilityRid cap_rid;
 
        /* Create the network device object. */
-       dev = alloc_netdev(sizeof(*ai), "", ether_setup);
+       dev = alloc_netdev(sizeof(*ai), "", NET_NAME_UNKNOWN, ether_setup);
        if (!dev) {
                airo_print_err("", "Couldn't alloc_etherdev");
                return NULL;
@@ -7817,7 +7818,6 @@ static int readrids(struct net_device *dev, aironet_ioctl *comp) {
        case AIRORRID:      ridcode = comp->ridnum;     break;
        default:
                return -EINVAL;
-               break;
        }
 
        if ((iobuf = kmalloc(RIDSIZE, GFP_KERNEL)) == NULL)
index d139f2a..e535807 100644 (file)
@@ -3637,7 +3637,7 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
        struct net_device *ndev;
        struct ath6kl_vif *vif;
 
-       ndev = alloc_netdev(sizeof(*vif), name, ether_setup);
+       ndev = alloc_netdev(sizeof(*vif), name, NET_NAME_UNKNOWN, ether_setup);
        if (!ndev)
                return NULL;
 
index a611184..fffd523 100644 (file)
@@ -1226,7 +1226,6 @@ static int ath6kl_upload_board_file(struct ath6kl *ar)
        default:
                WARN_ON(1);
                return -EINVAL;
-               break;
        }
 
        if (board_ext_address &&
index ab4ee7d..b80b213 100644 (file)
@@ -1139,7 +1139,6 @@ static int carl9170_set_freq_cal_data(struct ar9170 *ar,
 
        default:
                return -EINVAL;
-               break;
        }
 
        for (; i >= 0; i--) {
index 4ab5370..b71d2b3 100644 (file)
@@ -488,7 +488,6 @@ static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
                wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
                ret = -EOPNOTSUPP;
                goto out;
-               break;
        }
 
 out:
index 106b6dc..7afce6e 100644 (file)
@@ -132,7 +132,7 @@ void *wil_if_alloc(struct device *dev, void __iomem *csr)
        ch = wdev->wiphy->bands[IEEE80211_BAND_60GHZ]->channels;
        cfg80211_chandef_create(&wdev->preset_chandef, ch, NL80211_CHAN_NO_HT);
 
-       ndev = alloc_netdev(0, "wlan%d", ether_setup);
+       ndev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
        if (!ndev) {
                dev_err(dev, "alloc_netdev_mqs failed\n");
                rc = -ENOMEM;
index b456bcb..fb10439 100644 (file)
@@ -810,7 +810,8 @@ struct brcmf_if *brcmf_add_if(struct brcmf_pub *drvr, s32 bssidx, s32 ifidx,
        } else {
                brcmf_dbg(INFO, "allocate netdev interface\n");
                /* Allocate netdev, including space for private structure */
-               ndev = alloc_netdev(sizeof(*ifp), name, ether_setup);
+               ndev = alloc_netdev(sizeof(*ifp), name, NET_NAME_UNKNOWN,
+                                   ether_setup);
                if (!ndev)
                        return ERR_PTR(-ENOMEM);
 
index 588fdbd..057b982 100644 (file)
@@ -2364,7 +2364,6 @@ int brcmf_p2p_del_vif(struct wiphy *wiphy, struct wireless_dev *wdev)
                return 0;
        default:
                return -ENOTSUPP;
-               break;
        }
 
        clear_bit(BRCMF_P2P_STATUS_GO_NEG_PHASE, &p2p->status);
index 3e9f5b2..93869e8 100644 (file)
@@ -22916,7 +22916,6 @@ static void wlc_phy_rssi_cal_nphy_rev2(struct brcms_phy *pi, u8 rssi_type)
                break;
        default:
                return;
-               break;
        }
 
        classif_state = wlc_phy_classifier_nphy(pi, 0, 0);
index e23d67e..6f1b9aa 100644 (file)
@@ -290,7 +290,6 @@ static int config_reg_write(struct cw1200_common *priv, u32 val)
        case HIF_8601_SILICON:
        default:
                return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val);
-               break;
        }
        return 0;
 }
index 0c02f04..569b64e 100644 (file)
@@ -981,7 +981,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
                goto err_wdev;
        }
 
-       dev = alloc_netdev(0, "wlan%d", ether_setup);
+       dev = alloc_netdev(0, "wlan%d", NET_NAME_UNKNOWN, ether_setup);
        if (!dev) {
                dev_err(dmdev, "no memory for network device instance\n");
                goto err_adapter;
index 6fef746..01a67f6 100644 (file)
@@ -1000,7 +1000,7 @@ static int lbs_add_mesh(struct lbs_private *priv)
                goto done;
        }
 
-       mesh_dev = alloc_netdev(0, "msh%d", ether_setup);
+       mesh_dev = alloc_netdev(0, "msh%d", NET_NAME_UNKNOWN, ether_setup);
        if (!mesh_dev) {
                lbs_deb_mesh("init mshX device failed\n");
                ret = -ENOMEM;
index f39f504..1326f61 100644 (file)
@@ -2681,7 +2681,8 @@ static int __init init_mac80211_hwsim(void)
                        goto out_free_radios;
        }
 
-       hwsim_mon = alloc_netdev(0, "hwsim%d", hwsim_mon_setup);
+       hwsim_mon = alloc_netdev(0, "hwsim%d", NET_NAME_UNKNOWN,
+                                hwsim_mon_setup);
        if (hwsim_mon == NULL) {
                err = -ENOMEM;
                goto out_free_radios;
index a738cc9..e2e6bf1 100644 (file)
@@ -2234,7 +2234,8 @@ struct wireless_dev *mwifiex_add_virtual_intf(struct wiphy *wiphy,
        }
 
        dev = alloc_netdev_mqs(sizeof(struct mwifiex_private *), name,
-                              ether_setup, IEEE80211_NUM_ACS, 1);
+                              NET_NAME_UNKNOWN, ether_setup,
+                              IEEE80211_NUM_ACS, 1);
        if (!dev) {
                wiphy_err(wiphy, "no memory available for netdevice\n");
                priv->bss_mode = NL80211_IFTYPE_UNSPECIFIED;
index 3c0a0a8..9a3d4d6 100644 (file)
@@ -1633,22 +1633,17 @@ static int mwl8k_tid_queue_mapping(u8 tid)
        case 0:
        case 3:
                return IEEE80211_AC_BE;
-               break;
        case 1:
        case 2:
                return IEEE80211_AC_BK;
-               break;
        case 4:
        case 5:
                return IEEE80211_AC_VI;
-               break;
        case 6:
        case 7:
                return IEEE80211_AC_VO;
-               break;
        default:
                return -1;
-               break;
        }
 }
 
index c90939c..d3cf7c3 100644 (file)
@@ -921,7 +921,6 @@ static int ezusb_access_ltv(struct ezusb_priv *upriv,
                        retval = -EFAULT;
                }
                goto exit;
-               break;
        }
        if (ctx->in_rid) {
                struct ezusb_packet *ans = ctx->buf;
index 47b34bf..3a8d2db 100644 (file)
@@ -793,7 +793,6 @@ mgt_response_to_str(enum oid_num_t n, union oid_res_t *r, char *str)
        switch (isl_oid[n].flags & OID_FLAG_TYPE) {
        case OID_TYPE_U32:
                return snprintf(str, PRIV_STR_SIZE, "%u\n", r->u);
-               break;
        case OID_TYPE_BUFFER:{
                        struct obj_buffer *buff = r->ptr;
                        return snprintf(str, PRIV_STR_SIZE,
index b1ed6d0..56e218e 100644 (file)
@@ -1064,7 +1064,6 @@ static int rtl_op_ampdu_action(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_MAC80211, DBG_TRACE,
                         "IEEE80211_AMPDU_TX_START: TID:%d\n", tid);
                return rtl_tx_agg_start(hw, sta, tid, ssn);
-               break;
        case IEEE80211_AMPDU_TX_STOP_CONT:
        case IEEE80211_AMPDU_TX_STOP_FLUSH:
        case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
index a9cfa13..0f93142 100644 (file)
@@ -125,7 +125,6 @@ bool rtl88_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
                                RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
                                         "rtl88_hal_pwrseqcmdparsing(): PWR_CMD_END\n");
                                return true;
-                               break;
                        default:
                                RT_ASSERT(false,
                                          "rtl88_hal_pwrseqcmdparsing(): Unknown CMD!!\n");
index e273692..df98a5e 100644 (file)
@@ -1200,7 +1200,6 @@ static int _rtl92ce_set_media_status(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "Network type %d not supported!\n", type);
                return 1;
-               break;
 
        }
 
index 380e7d4..331b158 100644 (file)
@@ -112,13 +112,10 @@ static u8 _rtl92s_firmware_header_map_rftype(struct ieee80211_hw *hw)
        switch (rtlphy->rf_type) {
        case RF_1T1R:
                return 0x11;
-               break;
        case RF_1T2R:
                return 0x12;
-               break;
        case RF_2T2R:
                return 0x22;
-               break;
        default:
                RT_TRACE(rtlpriv, COMP_INIT, DBG_EMERG, "Unknown RF type(%x)\n",
                         rtlphy->rf_type);
@@ -438,7 +435,6 @@ int rtl92s_download_fw(struct ieee80211_hw *hw)
                        RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                                 "Unexpected Download step!!\n");
                        goto fail;
-                       break;
                }
 
                /* <2> Download image file */
index 1c7101b..00e0670 100644 (file)
@@ -1198,7 +1198,6 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
                RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
                         "Network type %d not supported!\n", type);
                return 1;
-               break;
 
        }
 
index 539e539..662a079 100644 (file)
@@ -1103,7 +1103,6 @@ static int _rtl8723ae_set_media_status(struct ieee80211_hw *hw,
                         "Network type %d not supported!\n",
                         type);
                return 1;
-               break;
 
        }
 
index e4a507a..4573310 100644 (file)
@@ -124,7 +124,6 @@ bool rtlbe_hal_pwrseqcmdparsing(struct rtl_priv *rtlpriv, u8 cut_version,
                                         "rtlbe_hal_pwrseqcmdparsing(): "
                                         "PWR_CMD_END\n");
                                return true;
-                               break;
                        default:
                                RT_ASSERT(false,
                                          "rtlbe_hal_pwrseqcmdparsing(): "
index 4dd7c4a..28c9822 100644 (file)
@@ -44,6 +44,7 @@
 #include <xen/interface/grant_table.h>
 #include <xen/grant_table.h>
 #include <xen/xenbus.h>
+#include <linux/debugfs.h>
 
 typedef unsigned int pending_ring_idx_t;
 #define INVALID_PENDING_RING_IDX (~0U)
@@ -222,6 +223,11 @@ struct xenvif {
 
        /* Queues */
        struct xenvif_queue *queues;
+       unsigned int num_queues; /* active queues, resource allocated */
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *xenvif_dbg_root;
+#endif
 
        /* Miscellaneous private stuff. */
        struct net_device *dev;
@@ -296,10 +302,16 @@ static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue)
 /* Callback from stack when TX packet can be released */
 void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success);
 
+irqreturn_t xenvif_interrupt(int irq, void *dev_id);
+
 extern bool separate_tx_rx_irq;
 
 extern unsigned int rx_drain_timeout_msecs;
 extern unsigned int rx_drain_timeout_jiffies;
 extern unsigned int xenvif_max_queues;
 
+#ifdef CONFIG_DEBUG_FS
+extern struct dentry *xen_netback_dbg_root;
+#endif
+
 #endif /* __XEN_NETBACK__COMMON_H__ */
index 852da34..bd59d9d 100644 (file)
@@ -102,7 +102,7 @@ static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
        return IRQ_HANDLED;
 }
 
-static irqreturn_t xenvif_interrupt(int irq, void *dev_id)
+irqreturn_t xenvif_interrupt(int irq, void *dev_id)
 {
        xenvif_tx_interrupt(irq, dev_id);
        xenvif_rx_interrupt(irq, dev_id);
@@ -137,32 +137,11 @@ static void xenvif_wake_queue_callback(unsigned long data)
        }
 }
 
-static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
-                              void *accel_priv, select_queue_fallback_t fallback)
-{
-       unsigned int num_queues = dev->real_num_tx_queues;
-       u32 hash;
-       u16 queue_index;
-
-       /* First, check if there is only one queue to optimise the
-        * single-queue or old frontend scenario.
-        */
-       if (num_queues == 1) {
-               queue_index = 0;
-       } else {
-               /* Use skb_get_hash to obtain an L4 hash if available */
-               hash = skb_get_hash(skb);
-               queue_index = hash % num_queues;
-       }
-
-       return queue_index;
-}
-
 static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        u16 index;
        int min_slots_needed;
 
@@ -225,7 +204,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
 {
        struct xenvif *vif = netdev_priv(dev);
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned long rx_bytes = 0;
        unsigned long rx_packets = 0;
        unsigned long tx_bytes = 0;
@@ -256,7 +235,7 @@ out:
 static void xenvif_up(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
 
        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -272,7 +251,7 @@ static void xenvif_up(struct xenvif *vif)
 static void xenvif_down(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
 
        for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -379,7 +358,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
                                     struct ethtool_stats *stats, u64 * data)
 {
        struct xenvif *vif = netdev_priv(dev);
-       unsigned int num_queues = dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        int i;
        unsigned int queue_index;
        struct xenvif_stats *vif_stats;
@@ -424,7 +403,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
        .ndo_fix_features = xenvif_fix_features,
        .ndo_set_mac_address = eth_mac_addr,
        .ndo_validate_addr   = eth_validate_addr,
-       .ndo_select_queue = xenvif_select_queue,
 };
 
 struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -438,10 +416,10 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
        /* Allocate a netdev with the max. supported number of queues.
         * When the guest selects the desired number, it will be updated
-        * via netif_set_real_num_tx_queues().
+        * via netif_set_real_num_*_queues().
         */
-       dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
-                             xenvif_max_queues);
+       dev = alloc_netdev_mq(sizeof(struct xenvif), name, NET_NAME_UNKNOWN,
+                             ether_setup, xenvif_max_queues);
        if (dev == NULL) {
                pr_warn("Could not allocate netdev for %s\n", name);
                return ERR_PTR(-ENOMEM);
@@ -458,11 +436,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
        vif->dev = dev;
        vif->disabled = false;
 
-       /* Start out with no queues. The call below does not require
-        * rtnl_lock() as it happens before register_netdev().
-        */
+       /* Start out with no queues. */
        vif->queues = NULL;
-       netif_set_real_num_tx_queues(dev, 0);
+       vif->num_queues = 0;
 
        dev->netdev_ops = &xenvif_netdev_ops;
        dev->hw_features = NETIF_F_SG |
@@ -677,7 +653,7 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
 void xenvif_disconnect(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
 
        if (netif_carrier_ok(vif->dev))
@@ -724,7 +700,7 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
 void xenvif_free(struct xenvif *vif)
 {
        struct xenvif_queue *queue = NULL;
-       unsigned int num_queues = vif->dev->real_num_tx_queues;
+       unsigned int num_queues = vif->num_queues;
        unsigned int queue_index;
        /* Here we want to avoid timeout messages if an skb can be legitimately
         * stuck somewhere else. Realistically this could be an another vif's
@@ -748,12 +724,9 @@ void xenvif_free(struct xenvif *vif)
                xenvif_deinit_queue(queue);
        }
 
-       /* Free the array of queues. The call below does not require
-        * rtnl_lock() because it happens after unregister_netdev().
-        */
-       netif_set_real_num_tx_queues(vif->dev, 0);
        vfree(vif->queues);
        vif->queues = NULL;
+       vif->num_queues = 0;
 
        free_netdev(vif->dev);
 
index 1844a47..769e553 100644 (file)
@@ -1030,14 +1030,21 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
 {
        struct gnttab_map_grant_ref *gop_map = *gopp_map;
        u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
+       /* This always points to the shinfo of the skb being checked, which
+        * could be either the first or the one on the frag_list
+        */
        struct skb_shared_info *shinfo = skb_shinfo(skb);
+       /* If this is non-NULL, we are currently checking the frag_list skb, and
+        * this points to the shinfo of the first one
+        */
+       struct skb_shared_info *first_shinfo = NULL;
        int nr_frags = shinfo->nr_frags;
+       const bool sharedslot = nr_frags &&
+                               frag_get_pending_idx(&shinfo->frags[0]) == pending_idx;
        int i, err;
-       struct sk_buff *first_skb = NULL;
 
        /* Check status of header. */
        err = (*gopp_copy)->status;
-       (*gopp_copy)++;
        if (unlikely(err)) {
                if (net_ratelimit())
                        netdev_dbg(queue->vif->dev,
@@ -1045,8 +1052,12 @@ static int xenvif_tx_check_gop(struct xenvif_queue *queue,
                                   (*gopp_copy)->status,
                                   pending_idx,
                                   (*gopp_copy)->source.u.ref);
-               xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
+               /* The first frag might still have this slot mapped */
+               if (!sharedslot)
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_ERROR);
        }
+       (*gopp_copy)++;
 
 check_frags:
        for (i = 0; i < nr_frags; i++, gop_map++) {
@@ -1062,8 +1073,19 @@ check_frags:
                                                pending_idx,
                                                gop_map->handle);
                        /* Had a previous error? Invalidate this fragment. */
-                       if (unlikely(err))
+                       if (unlikely(err)) {
                                xenvif_idx_unmap(queue, pending_idx);
+                               /* If the mapping of the first frag was OK, but
+                                * the header's copy failed, and they are
+                                * sharing a slot, send an error
+                                */
+                               if (i == 0 && sharedslot)
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_ERROR);
+                               else
+                                       xenvif_idx_release(queue, pending_idx,
+                                                          XEN_NETIF_RSP_OKAY);
+                       }
                        continue;
                }
 
@@ -1075,42 +1097,53 @@ check_frags:
                                   gop_map->status,
                                   pending_idx,
                                   gop_map->ref);
+
                xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_ERROR);
 
                /* Not the first error? Preceding frags already invalidated. */
                if (err)
                        continue;
-               /* First error: invalidate preceding fragments. */
+
+               /* First error: if the header haven't shared a slot with the
+                * first frag, release it as well.
+                */
+               if (!sharedslot)
+                       xenvif_idx_release(queue,
+                                          XENVIF_TX_CB(skb)->pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+
+               /* Invalidate preceding fragments of this skb. */
                for (j = 0; j < i; j++) {
                        pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
                        xenvif_idx_unmap(queue, pending_idx);
+                       xenvif_idx_release(queue, pending_idx,
+                                          XEN_NETIF_RSP_OKAY);
+               }
+
+               /* And if we found the error while checking the frag_list, unmap
+                * the first skb's frags
+                */
+               if (first_shinfo) {
+                       for (j = 0; j < first_shinfo->nr_frags; j++) {
+                               pending_idx = frag_get_pending_idx(&first_shinfo->frags[j]);
+                               xenvif_idx_unmap(queue, pending_idx);
+                               xenvif_idx_release(queue, pending_idx,
+                                                  XEN_NETIF_RSP_OKAY);
+                       }
                }
 
                /* Remember the error: invalidate all subsequent fragments. */
                err = newerr;
        }
 
-       if (skb_has_frag_list(skb)) {
-               first_skb = skb;
-               skb = shinfo->frag_list;
-               shinfo = skb_shinfo(skb);
+       if (skb_has_frag_list(skb) && !first_shinfo) {
+               first_shinfo = skb_shinfo(skb);
+               shinfo = skb_shinfo(skb_shinfo(skb)->frag_list);
                nr_frags = shinfo->nr_frags;
 
                goto check_frags;
        }
 
-       /* There was a mapping error in the frag_list skb. We have to unmap
-        * the first skb's frags
-        */
-       if (first_skb && err) {
-               int j;
-               shinfo = skb_shinfo(first_skb);
-               for (j = 0; j < shinfo->nr_frags; j++) {
-                       pending_idx = frag_get_pending_idx(&shinfo->frags[j]);
-                       xenvif_idx_unmap(queue, pending_idx);
-               }
-       }
-
        *gopp_map = gop_map;
        return err;
 }
@@ -1518,7 +1551,16 @@ static int xenvif_tx_submit(struct xenvif_queue *queue)
 
                /* Check the remap error code. */
                if (unlikely(xenvif_tx_check_gop(queue, skb, &gop_map, &gop_copy))) {
+                       /* If there was an error, xenvif_tx_check_gop is
+                        * expected to release all the frags which were mapped,
+                        * so kfree_skb shouldn't do it again
+                        */
                        skb_shinfo(skb)->nr_frags = 0;
+                       if (skb_has_frag_list(skb)) {
+                               struct sk_buff *nskb =
+                                               skb_shinfo(skb)->frag_list;
+                               skb_shinfo(nskb)->nr_frags = 0;
+                       }
                        kfree_skb(skb);
                        continue;
                }
@@ -1822,8 +1864,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
                           tx_unmap_op.status);
                BUG();
        }
-
-       xenvif_idx_release(queue, pending_idx, XEN_NETIF_RSP_OKAY);
 }
 
 static inline int rx_work_todo(struct xenvif_queue *queue)
@@ -1987,6 +2027,13 @@ static int __init netback_init(void)
 
        rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
 
+#ifdef CONFIG_DEBUG_FS
+       xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
+       if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+               pr_warn("Init of debugfs returned %ld!\n",
+                       PTR_ERR(xen_netback_dbg_root));
+#endif /* CONFIG_DEBUG_FS */
+
        return 0;
 
 failed_init:
@@ -1997,6 +2044,10 @@ module_init(netback_init);
 
 static void __exit netback_fini(void)
 {
+#ifdef CONFIG_DEBUG_FS
+       if (!IS_ERR_OR_NULL(xen_netback_dbg_root))
+               debugfs_remove_recursive(xen_netback_dbg_root);
+#endif /* CONFIG_DEBUG_FS */
        xenvif_xenbus_fini();
 }
 module_exit(netback_fini);
index 96c63dc..580517d 100644 (file)
@@ -44,6 +44,175 @@ static void unregister_hotplug_status_watch(struct backend_info *be);
 static void set_backend_state(struct backend_info *be,
                              enum xenbus_state state);
 
+#ifdef CONFIG_DEBUG_FS
+struct dentry *xen_netback_dbg_root = NULL;
+
+static int xenvif_read_io_ring(struct seq_file *m, void *v)
+{
+       struct xenvif_queue *queue = m->private;
+       struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
+       struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
+
+       if (tx_ring->sring) {
+               struct xen_netif_tx_sring *sring = tx_ring->sring;
+
+               seq_printf(m, "Queue %d\nTX: nr_ents %u\n", queue->id,
+                          tx_ring->nr_ents);
+               seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
+                          sring->req_prod,
+                          sring->req_prod - sring->rsp_prod,
+                          tx_ring->req_cons,
+                          tx_ring->req_cons - sring->rsp_prod,
+                          sring->req_event,
+                          sring->req_event - sring->rsp_prod);
+               seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n",
+                          sring->rsp_prod,
+                          tx_ring->rsp_prod_pvt,
+                          tx_ring->rsp_prod_pvt - sring->rsp_prod,
+                          sring->rsp_event,
+                          sring->rsp_event - sring->rsp_prod);
+               seq_printf(m, "pending prod %u pending cons %u nr_pending_reqs %u\n",
+                          queue->pending_prod,
+                          queue->pending_cons,
+                          nr_pending_reqs(queue));
+               seq_printf(m, "dealloc prod %u dealloc cons %u dealloc_queue %u\n\n",
+                          queue->dealloc_prod,
+                          queue->dealloc_cons,
+                          queue->dealloc_prod - queue->dealloc_cons);
+       }
+
+       if (rx_ring->sring) {
+               struct xen_netif_rx_sring *sring = rx_ring->sring;
+
+               seq_printf(m, "RX: nr_ents %u\n", rx_ring->nr_ents);
+               seq_printf(m, "req prod %u (%d) cons %u (%d) event %u (%d)\n",
+                          sring->req_prod,
+                          sring->req_prod - sring->rsp_prod,
+                          rx_ring->req_cons,
+                          rx_ring->req_cons - sring->rsp_prod,
+                          sring->req_event,
+                          sring->req_event - sring->rsp_prod);
+               seq_printf(m, "rsp prod %u (base) pvt %u (%d) event %u (%d)\n\n",
+                          sring->rsp_prod,
+                          rx_ring->rsp_prod_pvt,
+                          rx_ring->rsp_prod_pvt - sring->rsp_prod,
+                          sring->rsp_event,
+                          sring->rsp_event - sring->rsp_prod);
+       }
+
+       seq_printf(m, "NAPI state: %lx NAPI weight: %d TX queue len %u\n"
+                  "Credit timer_pending: %d, credit: %lu, usec: %lu\n"
+                  "remaining: %lu, expires: %lu, now: %lu\n",
+                  queue->napi.state, queue->napi.weight,
+                  skb_queue_len(&queue->tx_queue),
+                  timer_pending(&queue->credit_timeout),
+                  queue->credit_bytes,
+                  queue->credit_usec,
+                  queue->remaining_credit,
+                  queue->credit_timeout.expires,
+                  jiffies);
+
+       return 0;
+}
+
+#define XENVIF_KICK_STR "kick"
+
+static ssize_t
+xenvif_write_io_ring(struct file *filp, const char __user *buf, size_t count,
+                    loff_t *ppos)
+{
+       struct xenvif_queue *queue =
+               ((struct seq_file *)filp->private_data)->private;
+       int len;
+       char write[sizeof(XENVIF_KICK_STR)];
+
+       /* don't allow partial writes and check the length */
+       if (*ppos != 0)
+               return 0;
+       if (count < sizeof(XENVIF_KICK_STR) - 1)
+               return -ENOSPC;
+
+       len = simple_write_to_buffer(write,
+                                    sizeof(write),
+                                    ppos,
+                                    buf,
+                                    count);
+       if (len < 0)
+               return len;
+
+       if (!strncmp(write, XENVIF_KICK_STR, sizeof(XENVIF_KICK_STR) - 1))
+               xenvif_interrupt(0, (void *)queue);
+       else {
+               pr_warn("Unknown command to io_ring_q%d. Available: kick\n",
+                       queue->id);
+               count = -EINVAL;
+       }
+       return count;
+}
+
+static int xenvif_dump_open(struct inode *inode, struct file *filp)
+{
+       int ret;
+       void *queue = NULL;
+
+       if (inode->i_private)
+               queue = inode->i_private;
+       ret = single_open(filp, xenvif_read_io_ring, queue);
+       filp->f_mode |= FMODE_PWRITE;
+       return ret;
+}
+
+static const struct file_operations xenvif_dbg_io_ring_ops_fops = {
+       .owner = THIS_MODULE,
+       .open = xenvif_dump_open,
+       .read = seq_read,
+       .llseek = seq_lseek,
+       .release = single_release,
+       .write = xenvif_write_io_ring,
+};
+
+static void xenvif_debugfs_addif(struct xenvif_queue *queue)
+{
+       struct dentry *pfile;
+       struct xenvif *vif = queue->vif;
+       int i;
+
+       if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+               return;
+
+       vif->xenvif_dbg_root = debugfs_create_dir(vif->dev->name,
+                                                 xen_netback_dbg_root);
+       if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root)) {
+               for (i = 0; i < vif->num_queues; ++i) {
+                       char filename[sizeof("io_ring_q") + 4];
+
+                       snprintf(filename, sizeof(filename), "io_ring_q%d", i);
+                       pfile = debugfs_create_file(filename,
+                                                   S_IRUSR | S_IWUSR,
+                                                   vif->xenvif_dbg_root,
+                                                   &vif->queues[i],
+                                                   &xenvif_dbg_io_ring_ops_fops);
+                       if (IS_ERR_OR_NULL(pfile))
+                               pr_warn("Creation of io_ring file returned %ld!\n",
+                                       PTR_ERR(pfile));
+               }
+       } else
+               netdev_warn(vif->dev,
+                           "Creation of vif debugfs dir returned %ld!\n",
+                           PTR_ERR(vif->xenvif_dbg_root));
+}
+
+static void xenvif_debugfs_delif(struct xenvif *vif)
+{
+       if (IS_ERR_OR_NULL(xen_netback_dbg_root))
+               return;
+
+       if (!IS_ERR_OR_NULL(vif->xenvif_dbg_root))
+               debugfs_remove_recursive(vif->xenvif_dbg_root);
+       vif->xenvif_dbg_root = NULL;
+}
+#endif /* CONFIG_DEBUG_FS */
+
 static int netback_remove(struct xenbus_device *dev)
 {
        struct backend_info *be = dev_get_drvdata(&dev->dev);
@@ -246,8 +415,12 @@ static void backend_create_xenvif(struct backend_info *be)
 
 static void backend_disconnect(struct backend_info *be)
 {
-       if (be->vif)
+       if (be->vif) {
+#ifdef CONFIG_DEBUG_FS
+               xenvif_debugfs_delif(be->vif);
+#endif /* CONFIG_DEBUG_FS */
                xenvif_disconnect(be->vif);
+       }
 }
 
 static void backend_connect(struct backend_info *be)
@@ -527,9 +700,7 @@ static void connect(struct backend_info *be)
        /* Use the number of queues requested by the frontend */
        be->vif->queues = vzalloc(requested_num_queues *
                                  sizeof(struct xenvif_queue));
-       rtnl_lock();
-       netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
-       rtnl_unlock();
+       be->vif->num_queues = requested_num_queues;
 
        for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
                queue = &be->vif->queues[queue_index];
@@ -546,9 +717,7 @@ static void connect(struct backend_info *be)
                         * earlier queues can be destroyed using the regular
                         * disconnect logic.
                         */
-                       rtnl_lock();
-                       netif_set_real_num_tx_queues(be->vif->dev, queue_index);
-                       rtnl_unlock();
+                       be->vif->num_queues = queue_index;
                        goto err;
                }
 
@@ -561,13 +730,22 @@ static void connect(struct backend_info *be)
                         * and also clean up any previously initialised queues.
                         */
                        xenvif_deinit_queue(queue);
-                       rtnl_lock();
-                       netif_set_real_num_tx_queues(be->vif->dev, queue_index);
-                       rtnl_unlock();
+                       be->vif->num_queues = queue_index;
                        goto err;
                }
+#ifdef CONFIG_DEBUG_FS
+               xenvif_debugfs_addif(queue);
+#endif /* CONFIG_DEBUG_FS */
        }
 
+       /* Initialisation completed, tell core driver the number of
+        * active queues.
+        */
+       rtnl_lock();
+       netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
+       netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
+       rtnl_unlock();
+
        xenvif_carrier_on(be->vif);
 
        unregister_hotplug_status_watch(be);
@@ -582,13 +760,11 @@ static void connect(struct backend_info *be)
        return;
 
 err:
-       if (be->vif->dev->real_num_tx_queues > 0)
+       if (be->vif->num_queues > 0)
                xenvif_disconnect(be->vif); /* Clean up existing queues */
        vfree(be->vif->queues);
        be->vif->queues = NULL;
-       rtnl_lock();
-       netif_set_real_num_tx_queues(be->vif->dev, 0);
-       rtnl_unlock();
+       be->vif->num_queues = 0;
        return;
 }
 
@@ -596,7 +772,7 @@ err:
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
 {
        struct xenbus_device *dev = be->dev;
-       unsigned int num_queues = queue->vif->dev->real_num_tx_queues;
+       unsigned int num_queues = queue->vif->num_queues;
        unsigned long tx_ring_ref, rx_ring_ref;
        unsigned int tx_evtchn, rx_evtchn;
        int err;
index 5a7872a..055222b 100644 (file)
@@ -1287,7 +1287,7 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
 
        if (likely(netif_carrier_ok(dev) &&
                   RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
-                       napi_schedule(&queue->napi);
+               napi_schedule(&queue->napi);
 
        return IRQ_HANDLED;
 }
@@ -1437,16 +1437,12 @@ static void xennet_end_access(int ref, void *page)
 static void xennet_disconnect_backend(struct netfront_info *info)
 {
        unsigned int i = 0;
-       struct netfront_queue *queue = NULL;
        unsigned int num_queues = info->netdev->real_num_tx_queues;
 
+       netif_carrier_off(info->netdev);
+
        for (i = 0; i < num_queues; ++i) {
-               /* Stop old i/f to prevent errors whilst we rebuild the state. */
-               spin_lock_bh(&queue->rx_lock);
-               spin_lock_irq(&queue->tx_lock);
-               netif_carrier_off(queue->info->netdev);
-               spin_unlock_irq(&queue->tx_lock);
-               spin_unlock_bh(&queue->rx_lock);
+               struct netfront_queue *queue = &info->queues[i];
 
                if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
                        unbind_from_irqhandler(queue->tx_irq, queue);
@@ -1457,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
                queue->tx_evtchn = queue->rx_evtchn = 0;
                queue->tx_irq = queue->rx_irq = 0;
 
+               napi_synchronize(&queue->napi);
+
                /* End access and free the pages */
                xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
                xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1698,8 +1696,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
                goto exit_free_tx;
        }
 
-       netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
-
        return 0;
 
  exit_free_tx:
@@ -1790,6 +1786,70 @@ error:
        return err;
 }
 
+static void xennet_destroy_queues(struct netfront_info *info)
+{
+       unsigned int i;
+
+       rtnl_lock();
+
+       for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];
+
+               if (netif_running(info->netdev))
+                       napi_disable(&queue->napi);
+               netif_napi_del(&queue->napi);
+       }
+
+       rtnl_unlock();
+
+       kfree(info->queues);
+       info->queues = NULL;
+}
+
+static int xennet_create_queues(struct netfront_info *info,
+                               unsigned int num_queues)
+{
+       unsigned int i;
+       int ret;
+
+       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
+                              GFP_KERNEL);
+       if (!info->queues)
+               return -ENOMEM;
+
+       rtnl_lock();
+
+       for (i = 0; i < num_queues; i++) {
+               struct netfront_queue *queue = &info->queues[i];
+
+               queue->id = i;
+               queue->info = info;
+
+               ret = xennet_init_queue(queue);
+               if (ret < 0) {
+                       dev_warn(&info->netdev->dev, "only created %d queues\n",
+                                num_queues);
+                       num_queues = i;
+                       break;
+               }
+
+               netif_napi_add(queue->info->netdev, &queue->napi,
+                              xennet_poll, 64);
+               if (netif_running(info->netdev))
+                       napi_enable(&queue->napi);
+       }
+
+       netif_set_real_num_tx_queues(info->netdev, num_queues);
+
+       rtnl_unlock();
+
+       if (num_queues == 0) {
+               dev_err(&info->netdev->dev, "no queues\n");
+               return -EINVAL;
+       }
+       return 0;
+}
+
 /* Common code used when first setting up, and when resuming. */
 static int talk_to_netback(struct xenbus_device *dev,
                           struct netfront_info *info)
@@ -1826,42 +1886,20 @@ static int talk_to_netback(struct xenbus_device *dev,
                goto out;
        }
 
-       /* Allocate array of queues */
-       info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL);
-       if (!info->queues) {
-               err = -ENOMEM;
-               goto out;
-       }
-       rtnl_lock();
-       netif_set_real_num_tx_queues(info->netdev, num_queues);
-       rtnl_unlock();
+       if (info->queues)
+               xennet_destroy_queues(info);
+
+       err = xennet_create_queues(info, num_queues);
+       if (err < 0)
+               goto destroy_ring;
 
        /* Create shared ring, alloc event channel -- for each queue */
        for (i = 0; i < num_queues; ++i) {
                queue = &info->queues[i];
-               queue->id = i;
-               queue->info = info;
-               err = xennet_init_queue(queue);
-               if (err) {
-                       /* xennet_init_queue() cleans up after itself on failure,
-                        * but we still have to clean up any previously initialised
-                        * queues. If i > 0, set num_queues to i, then goto
-                        * destroy_ring, which calls xennet_disconnect_backend()
-                        * to tidy up.
-                        */
-                       if (i > 0) {
-                               rtnl_lock();
-                               netif_set_real_num_tx_queues(info->netdev, i);
-                               rtnl_unlock();
-                               goto destroy_ring;
-                       } else {
-                               goto out;
-                       }
-               }
                err = setup_netfront(dev, queue, feature_split_evtchn);
                if (err) {
-                       /* As for xennet_init_queue(), setup_netfront() will tidy
-                        * up the current queue on error, but we need to clean up
+                       /* setup_netfront() will tidy up the current
+                        * queue on error, but we need to clean up
                         * those already allocated.
                         */
                        if (i > 0) {
@@ -2005,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
        /* By now, the queue structures have been set up */
        for (j = 0; j < num_queues; ++j) {
                queue = &np->queues[j];
-               spin_lock_bh(&queue->rx_lock);
-               spin_lock_irq(&queue->tx_lock);
 
                /* Step 1: Discard all pending TX packet fragments. */
+               spin_lock_irq(&queue->tx_lock);
                xennet_release_tx_bufs(queue);
+               spin_unlock_irq(&queue->tx_lock);
 
                /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
+               spin_lock_bh(&queue->rx_lock);
+
                for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
                        skb_frag_t *frag;
                        const struct page *page;
@@ -2035,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
                }
 
                queue->rx.req_prod_pvt = requeue_idx;
+
+               spin_unlock_bh(&queue->rx_lock);
        }
 
        /*
@@ -2046,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
        netif_carrier_on(np->netdev);
        for (j = 0; j < num_queues; ++j) {
                queue = &np->queues[j];
+
                notify_remote_via_irq(queue->tx_irq);
                if (queue->tx_irq != queue->rx_irq)
                        notify_remote_via_irq(queue->rx_irq);
-               xennet_tx_buf_gc(queue);
-               xennet_alloc_rx_buffers(queue);
 
+               spin_lock_irq(&queue->tx_lock);
+               xennet_tx_buf_gc(queue);
                spin_unlock_irq(&queue->tx_lock);
+
+               spin_lock_bh(&queue->rx_lock);
+               xennet_alloc_rx_buffers(queue);
                spin_unlock_bh(&queue->rx_lock);
        }
 
index 8368d96..b986480 100644 (file)
@@ -227,7 +227,8 @@ static int __of_node_add(struct device_node *np)
        np->kobj.kset = of_kset;
        if (!np->parent) {
                /* Nodes without parents are new top level trees */
-               rc = kobject_add(&np->kobj, NULL, safe_name(&of_kset->kobj, "base"));
+               rc = kobject_add(&np->kobj, NULL, "%s",
+                                safe_name(&of_kset->kobj, "base"));
        } else {
                name = safe_name(&np->parent->kobj, kbasename(np->full_name));
                if (!name || !name[0])
@@ -1960,9 +1961,9 @@ int of_attach_node(struct device_node *np)
 
        raw_spin_lock_irqsave(&devtree_lock, flags);
        np->sibling = np->parent->child;
-       np->allnext = of_allnodes;
+       np->allnext = np->parent->allnext;
+       np->parent->allnext = np;
        np->parent->child = np;
-       of_allnodes = np;
        of_node_clear_flag(np, OF_DETACHED);
        raw_spin_unlock_irqrestore(&devtree_lock, flags);
 
index c4cddf0..9aa012e 100644 (file)
 #include <asm/setup.h>  /* for COMMAND_LINE_SIZE */
 #include <asm/page.h>
 
+/*
+ * of_fdt_limit_memory - limit the number of regions in the /memory node
+ * @limit: maximum entries
+ *
+ * Adjust the flattened device tree to have at most 'limit' number of
+ * memory entries in the /memory node. This function may be called
+ * any time after initial_boot_param is set.
+ */
+void of_fdt_limit_memory(int limit)
+{
+       int memory;
+       int len;
+       const void *val;
+       int nr_address_cells = OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
+       int nr_size_cells = OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
+       const uint32_t *addr_prop;
+       const uint32_t *size_prop;
+       int root_offset;
+       int cell_size;
+
+       root_offset = fdt_path_offset(initial_boot_params, "/");
+       if (root_offset < 0)
+               return;
+
+       addr_prop = fdt_getprop(initial_boot_params, root_offset,
+                               "#address-cells", NULL);
+       if (addr_prop)
+               nr_address_cells = fdt32_to_cpu(*addr_prop);
+
+       size_prop = fdt_getprop(initial_boot_params, root_offset,
+                               "#size-cells", NULL);
+       if (size_prop)
+               nr_size_cells = fdt32_to_cpu(*size_prop);
+
+       cell_size = sizeof(uint32_t)*(nr_address_cells + nr_size_cells);
+
+       memory = fdt_path_offset(initial_boot_params, "/memory");
+       if (memory > 0) {
+               val = fdt_getprop(initial_boot_params, memory, "reg", &len);
+               if (len > limit*cell_size) {
+                       len = limit*cell_size;
+                       pr_debug("Limiting number of entries to %d\n", limit);
+                       fdt_setprop(initial_boot_params, memory, "reg", val,
+                                       len);
+               }
+       }
+}
+
 /**
  * of_fdt_is_compatible - Return true if given node from the given blob has
  * compat in its compatible list
@@ -880,6 +928,21 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
        const u64 phys_offset = __pa(PAGE_OFFSET);
        base &= PAGE_MASK;
        size &= PAGE_MASK;
+
+       if (sizeof(phys_addr_t) < sizeof(u64)) {
+               if (base > ULONG_MAX) {
+                       pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
+                                       base, base + size);
+                       return;
+               }
+
+               if (base + size > ULONG_MAX) {
+                       pr_warning("Ignoring memory range 0x%lx - 0x%llx\n",
+                                       ULONG_MAX, base + size);
+                       size = ULONG_MAX - base;
+               }
+       }
+
        if (base + size < phys_offset) {
                pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
                           base, base + size);
@@ -922,7 +985,7 @@ int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
 }
 #endif
 
-bool __init early_init_dt_scan(void *params)
+bool __init early_init_dt_verify(void *params)
 {
        if (!params)
                return false;
@@ -936,6 +999,12 @@ bool __init early_init_dt_scan(void *params)
                return false;
        }
 
+       return true;
+}
+
+
+void __init early_init_dt_scan_nodes(void)
+{
        /* Retrieve various information from the /chosen node */
        of_scan_flat_dt(early_init_dt_scan_chosen, boot_command_line);
 
@@ -944,7 +1013,17 @@ bool __init early_init_dt_scan(void *params)
 
        /* Setup memory, calling early_init_dt_add_memory_arch */
        of_scan_flat_dt(early_init_dt_scan_memory, NULL);
+}
+
+bool __init early_init_dt_scan(void *params)
+{
+       bool status;
+
+       status = early_init_dt_verify(params);
+       if (!status)
+               return false;
 
+       early_init_dt_scan_nodes();
        return true;
 }
 
index fb4a598..401b245 100644 (file)
@@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 }
 EXPORT_SYMBOL(of_mdiobus_register);
 
-/**
- * of_mdiobus_link_phydev - Find a device node for a phy
- * @mdio: pointer to mii_bus structure
- * @phydev: phydev for which the of_node pointer should be set
- *
- * Walk the list of subnodes of a mdio bus and look for a node that matches the
- * phy's address with its 'reg' property. If found, set the of_node pointer for
- * the phy. This allows auto-probed pyh devices to be supplied with information
- * passed in via DT.
- */
-void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                           struct phy_device *phydev)
-{
-       struct device *dev = &phydev->dev;
-       struct device_node *child;
-
-       if (dev->of_node || !mdio->dev.of_node)
-               return;
-
-       for_each_available_child_of_node(mdio->dev.of_node, child) {
-               int addr;
-
-               addr = of_mdio_parse_addr(&mdio->dev, child);
-               if (addr < 0)
-                       continue;
-
-               if (addr == phydev->addr) {
-                       dev->of_node = child;
-                       return;
-               }
-       }
-}
-EXPORT_SYMBOL(of_mdiobus_link_phydev);
-
 /* Helper function for of_phy_find_device */
 static int of_phy_match(struct device *dev, void *phy_np)
 {
@@ -323,11 +289,13 @@ int of_phy_register_fixed_link(struct device_node *np)
        fixed_link_node = of_get_child_by_name(np, "fixed-link");
        if (fixed_link_node) {
                status.link = 1;
-               status.duplex = of_property_read_bool(np, "full-duplex");
+               status.duplex = of_property_read_bool(fixed_link_node,
+                                                     "full-duplex");
                if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
                        return -EINVAL;
-               status.pause = of_property_read_bool(np, "pause");
-               status.asym_pause = of_property_read_bool(np, "asym-pause");
+               status.pause = of_property_read_bool(fixed_link_node, "pause");
+               status.asym_pause = of_property_read_bool(fixed_link_node,
+                                                         "asym-pause");
                of_node_put(fixed_link_node);
                return fixed_phy_register(PHY_POLL, &status, np);
        }
index 6c48d73..500436f 100644 (file)
@@ -166,10 +166,6 @@ static void of_dma_configure(struct platform_device *pdev)
        int ret;
        struct device *dev = &pdev->dev;
 
-#if defined(CONFIG_MICROBLAZE)
-       pdev->archdata.dma_mask = 0xffffffffUL;
-#endif
-
        /*
         * Set default dma-mask to 32 bit. Drivers are expected to setup
         * the correct supported dma_mask.
index 2872ece..44333bd 100644 (file)
@@ -5,6 +5,12 @@
 # Parport configuration.
 #
 
+config ARCH_MIGHT_HAVE_PC_PARPORT
+       bool
+       help
+         Select this config option from the architecture Kconfig if
+         the architecture might have PC parallel port hardware.
+
 menuconfig PARPORT
        tristate "Parallel port support"
        depends on HAS_IOMEM
@@ -31,12 +37,6 @@ menuconfig PARPORT
 
          If unsure, say Y.
 
-config ARCH_MIGHT_HAVE_PC_PARPORT
-       bool
-       help
-         Select this config option from the architecture Kconfig if
-         the architecture might have PC parallel port hardware.
-
 if PARPORT
 
 config PARPORT_PC
index 63a54a3..1c8592b 100644 (file)
@@ -3135,8 +3135,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
        if (probe)
                return 0;
 
-       /* Wait for Transaction Pending bit clean */
-       if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP))
+       /*
+        * Wait for Transaction Pending bit to clear.  A word-aligned test
+        * is used, so we use the conrol offset rather than status and shift
+        * the test bit to match.
+        */
+       if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
+                                PCI_AF_STATUS_TP << 8))
                goto clear;
 
        dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
index 16a2f06..64b98d2 100644 (file)
@@ -112,6 +112,7 @@ config PHY_EXYNOS5250_SATA
 config PHY_SUN4I_USB
        tristate "Allwinner sunxi SoC USB PHY driver"
        depends on ARCH_SUNXI && HAS_IOMEM && OF
+       depends on RESET_CONTROLLER
        select GENERIC_PHY
        help
          Enable this to support the transceiver that is part of Allwinner
@@ -122,6 +123,7 @@ config PHY_SUN4I_USB
 
 config PHY_SAMSUNG_USB2
        tristate "Samsung USB 2.0 PHY driver"
+       depends on HAS_IOMEM
        select GENERIC_PHY
        select MFD_SYSCON
        help
index c64a2f3..49c4465 100644 (file)
@@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
        return phy;
 
 put_dev:
-       put_device(&phy->dev);
-       ida_remove(&phy_ida, phy->id);
+       put_device(&phy->dev);  /* calls phy_release() which frees resources */
+       return ERR_PTR(ret);
+
 free_phy:
        kfree(phy);
        return ERR_PTR(ret);
@@ -799,7 +800,7 @@ static void phy_release(struct device *dev)
 
        phy = to_phy(dev);
        dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
-       ida_remove(&phy_ida, phy->id);
+       ida_simple_remove(&phy_ida, phy->id);
        kfree(phy);
 }
 
index 7007c11..34b3961 100644 (file)
@@ -233,8 +233,8 @@ static int omap_usb2_probe(struct platform_device *pdev)
        if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
                res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
                phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
-               if (!phy->phy_base)
-                       return -ENOMEM;
+               if (IS_ERR(phy->phy_base))
+                       return PTR_ERR(phy->phy_base);
                phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
        }
 
@@ -262,7 +262,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
        otg->phy                = &phy->phy;
 
        platform_set_drvdata(pdev, phy);
-       pm_runtime_enable(phy->dev);
 
        generic_phy = devm_phy_create(phy->dev, &ops, NULL);
        if (IS_ERR(generic_phy))
@@ -270,10 +269,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
 
        phy_set_drvdata(generic_phy, phy);
 
+       pm_runtime_enable(phy->dev);
        phy_provider = devm_of_phy_provider_register(phy->dev,
                        of_phy_simple_xlate);
-       if (IS_ERR(phy_provider))
+       if (IS_ERR(phy_provider)) {
+               pm_runtime_disable(phy->dev);
                return PTR_ERR(phy_provider);
+       }
 
        phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
        if (IS_ERR(phy->wkupclk)) {
@@ -317,6 +319,7 @@ static int omap_usb2_remove(struct platform_device *pdev)
        if (!IS_ERR(phy->optclk))
                clk_unprepare(phy->optclk);
        usb_remove_phy(&phy->phy);
+       pm_runtime_disable(phy->dev);
 
        return 0;
 }
index 8a8c6bc..1e69a32 100644 (file)
@@ -107,6 +107,7 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
 #endif
        { },
 };
+MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match);
 
 static int samsung_usb2_phy_probe(struct platform_device *pdev)
 {
index edf5d2f..86db223 100644 (file)
@@ -320,7 +320,7 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
 
        regmap = dev_get_regmap(&pdev->dev, NULL);
        if (!regmap)
-               return PTR_ERR(regmap);
+               return -ENODEV;
 
        pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
        if (!pctrl)
index 1bd6363..9f43916 100644 (file)
@@ -1431,7 +1431,7 @@ static void st_gpio_irqmux_handler(unsigned irq, struct irq_desc *desc)
 
        status = readl(info->irqmux_base);
 
-       for_each_set_bit(n, &status, ST_GPIO_PINS_PER_BANK)
+       for_each_set_bit(n, &status, info->nbanks)
                __gpio_irq_handler(&info->banks[n]);
 
        chained_irq_exit(chip, desc);
index f1ca75e..5f38c7f 100644 (file)
@@ -211,6 +211,10 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
                        configlen++;
 
                pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
+               if (!pinconfig) {
+                       kfree(*map);
+                       return -ENOMEM;
+               }
 
                if (!of_property_read_u32(node, "allwinner,drive", &val)) {
                        u16 strength = (val + 1) * 10;
index 6aea373..ee3de34 100644 (file)
@@ -74,7 +74,7 @@ config DP83640_PHY
 
 config PTP_1588_CLOCK_PCH
        tristate "Intel PCH EG20T as PTP clock"
-       depends on X86 || COMPILE_TEST
+       depends on X86_32 || COMPILE_TEST
        depends on HAS_IOMEM && NET
        select PTP_1588_CLOCK
        help
index 419056d..f8a7609 100644 (file)
@@ -86,17 +86,12 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
                        return -EINVAL;
                break;
        case PTP_PF_PHYSYNC:
-               pr_err("sorry, cannot reassign the calibration pin\n");
-               return -EINVAL;
+               if (chan != 0)
+                       return -EINVAL;
        default:
                return -EINVAL;
        }
 
-       if (pin2->func == PTP_PF_PHYSYNC) {
-               pr_err("sorry, cannot reprogram the calibration pin\n");
-               return -EINVAL;
-       }
-
        if (info->verify(info, pin, func, chan)) {
                pr_err("driver cannot use function %u on pin %u\n", func, chan);
                return -EOPNOTSUPP;
index 8558521..ad9e0c9 100644 (file)
@@ -433,6 +433,7 @@ static struct regulator_ops as3722_ldo3_extcntrl_ops = {
 };
 
 static const struct regulator_linear_range as3722_ldo_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
        REGULATOR_LINEAR_RANGE(825000, 0x01, 0x24, 25000),
        REGULATOR_LINEAR_RANGE(1725000, 0x40, 0x7F, 25000),
 };
@@ -609,6 +610,7 @@ static bool as3722_sd0_is_low_voltage(struct as3722_regulators *as3722_regs)
 }
 
 static const struct regulator_linear_range as3722_sd2345_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x00, 0x00, 0),
        REGULATOR_LINEAR_RANGE(612500, 0x01, 0x40, 12500),
        REGULATOR_LINEAR_RANGE(1425000, 0x41, 0x70, 25000),
        REGULATOR_LINEAR_RANGE(2650000, 0x71, 0x7F, 50000),
index 57544e2..58ece59 100644 (file)
@@ -119,6 +119,10 @@ static const unsigned int ldo_c_table[] = {
        2900000, 3000000, 3300000,
 };
 
+static const unsigned int ldo_vbus[] = {
+       5000000,
+};
+
 /* DCDC group CSR: supported voltages in microvolts */
 static const struct regulator_linear_range dcdc_csr_ranges[] = {
        REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
@@ -192,6 +196,7 @@ static struct bcm590xx_info bcm590xx_regs[] = {
        BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
        BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
        BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
+       BCM590XX_REG_TABLE(vbus, ldo_vbus),
 };
 
 struct bcm590xx_reg {
index 110a99e..c810518 100644 (file)
@@ -255,7 +255,7 @@ static int ltc3589_parse_regulators_dt(struct ltc3589 *ltc3589)
        struct device_node *node;
        int i, ret;
 
-       node = of_find_node_by_name(dev->of_node, "regulators");
+       node = of_get_child_by_name(dev->of_node, "regulators");
        if (!node) {
                dev_err(dev, "regulators node not found\n");
                return -EINVAL;
index 864ed02..93b4ad8 100644 (file)
@@ -37,12 +37,14 @@ struct regs_info {
 };
 
 static const struct regulator_linear_range smps_low_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
        REGULATOR_LINEAR_RANGE(500000, 0x1, 0x6, 0),
        REGULATOR_LINEAR_RANGE(510000, 0x7, 0x79, 10000),
        REGULATOR_LINEAR_RANGE(1650000, 0x7A, 0x7f, 0),
 };
 
 static const struct regulator_linear_range smps_high_ranges[] = {
+       REGULATOR_LINEAR_RANGE(0, 0x0, 0x0, 0),
        REGULATOR_LINEAR_RANGE(1000000, 0x1, 0x6, 0),
        REGULATOR_LINEAR_RANGE(1020000, 0x7, 0x79, 20000),
        REGULATOR_LINEAR_RANGE(3300000, 0x7A, 0x7f, 0),
@@ -323,6 +325,10 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
        if (rail_enable)
                palmas_smps_write(pmic->palmas,
                        palmas_regs_info[id].ctrl_addr, reg);
+
+       /* Switch the enable value to ensure this is used for enable */
+       pmic->desc[id].enable_val = pmic->current_reg_mode[id];
+
        return 0;
 }
 
@@ -962,6 +968,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
                                return ret;
                        pmic->current_reg_mode[id] = reg &
                                        PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+
+                       pmic->desc[id].enable_reg =
+                                       PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
+                                               palmas_regs_info[id].ctrl_addr);
+                       pmic->desc[id].enable_mask =
+                                       PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
+                       /* set_mode overrides this value */
+                       pmic->desc[id].enable_val = SMPS_CTRL_MODE_ON;
                }
 
                pmic->desc[id].type = REGULATOR_VOLTAGE;
index 69b4b77..9effe48 100644 (file)
@@ -209,7 +209,7 @@ static const struct regulator_desc regulators[] = {
                           1, -1, -1, TPS65218_REG_ENABLE1,
                           TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0, 0),
        TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
-                          TPS65218_REG_CONTROL_DCDC4,
+                          TPS65218_REG_CONTROL_LDO1,
                           TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
                           TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
                           2, 0),
@@ -240,6 +240,7 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
        config.init_data = init_data;
        config.driver_data = tps;
        config.regmap = tps->regmap;
+       config.of_node = pdev->dev.of_node;
 
        rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
        if (IS_ERR(rdev)) {
index ce1743d..5e343ba 100644 (file)
@@ -44,7 +44,7 @@ config STE_MODEM_RPROC
 config DA8XX_REMOTEPROC
        tristate "DA8xx/OMAP-L13x remoteproc support"
        depends on ARCH_DAVINCI_DA8XX
-       select CMA
+       select CMA if MMU
        select REMOTEPROC
        select RPMSG
        help
index 1ecfe3b..1cff2a2 100644 (file)
@@ -71,7 +71,7 @@ static int puv3_rtc_setpie(struct device *dev, int enabled)
 {
        unsigned int tmp;
 
-       dev_debug(dev, "%s: pie=%d\n", __func__, enabled);
+       dev_dbg(dev, "%s: pie=%d\n", __func__, enabled);
 
        spin_lock_irq(&puv3_rtc_pie_lock);
        tmp = readl(RTC_RTSR) & ~RTC_RTSR_HZE;
@@ -140,7 +140,7 @@ static int puv3_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm)
        rtc_tm_to_time(tm, &rtcalarm_count);
        writel(rtcalarm_count, RTC_RTAR);
 
-       puv3_rtc_setaie(&dev->dev, alrm->enabled);
+       puv3_rtc_setaie(dev, alrm->enabled);
 
        if (alrm->enabled)
                enable_irq_wake(puv3_rtc_alarmno);
index ee0e85a..0f47175 100644 (file)
@@ -593,7 +593,7 @@ dcssblk_add_store(struct device *dev, struct device_attribute *attr, const char
        dev_info->start = dcssblk_find_lowest_addr(dev_info);
        dev_info->end = dcssblk_find_highest_addr(dev_info);
 
-       dev_set_name(&dev_info->dev, dev_info->segment_name);
+       dev_set_name(&dev_info->dev, "%s", dev_info->segment_name);
        dev_info->dev.release = dcssblk_release_segment;
        dev_info->dev.groups = dcssblk_dev_attr_groups;
        INIT_LIST_HEAD(&dev_info->lh);
index 629fcc2..78b6ace 100644 (file)
@@ -19,7 +19,6 @@ obj-$(CONFIG_SCLP_VT220_TTY) += sclp_vt220.o
 obj-$(CONFIG_SCLP_CPI) += sclp_cpi.o
 obj-$(CONFIG_SCLP_ASYNC) += sclp_async.o
 
-obj-$(CONFIG_ZVM_WATCHDOG) += vmwatchdog.o
 obj-$(CONFIG_VMLOGRDR) += vmlogrdr.o
 obj-$(CONFIG_VMCP) += vmcp.o
 
index 15b3459..220acb4 100644 (file)
@@ -633,7 +633,6 @@ raw3270_reset_device_cb(struct raw3270_request *rq, void *data)
        } else
                raw3270_writesf_readpart(rp);
        memset(&rp->init_reset, 0, sizeof(rp->init_reset));
-       memset(&rp->init_data, 0, sizeof(rp->init_data));
 }
 
 static int
index cd9c919..b9a9f72 100644 (file)
@@ -838,8 +838,6 @@ sclp_vt220_con_init(void)
 {
        int rc;
 
-       if (!CONSOLE_IS_SCLP)
-               return 0;
        rc = __sclp_vt220_init(sclp_console_pages);
        if (rc)
                return rc;
index cf31d33..a8848db 100644 (file)
@@ -761,7 +761,7 @@ static int vmlogrdr_register_device(struct vmlogrdr_priv_t *priv)
 
        dev = kzalloc(sizeof(struct device), GFP_KERNEL);
        if (dev) {
-               dev_set_name(dev, priv->internal_name);
+               dev_set_name(dev, "%s", priv->internal_name);
                dev->bus = &iucv_bus;
                dev->parent = iucv_root;
                dev->driver = &vmlogrdr_driver;
diff --git a/drivers/s390/char/vmwatchdog.c b/drivers/s390/char/vmwatchdog.c
deleted file mode 100644 (file)
index d5eac98..0000000
+++ /dev/null
@@ -1,338 +0,0 @@
-/*
- * Watchdog implementation based on z/VM Watchdog Timer API
- *
- * Copyright IBM Corp. 2004, 2009
- *
- * The user space watchdog daemon can use this driver as
- * /dev/vmwatchdog to have z/VM execute the specified CP
- * command when the timeout expires. The default command is
- * "IPL", which which cause an immediate reboot.
- */
-#define KMSG_COMPONENT "vmwatchdog"
-#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-
-#include <linux/init.h>
-#include <linux/fs.h>
-#include <linux/kernel.h>
-#include <linux/miscdevice.h>
-#include <linux/module.h>
-#include <linux/moduleparam.h>
-#include <linux/slab.h>
-#include <linux/suspend.h>
-#include <linux/watchdog.h>
-
-#include <asm/ebcdic.h>
-#include <asm/io.h>
-#include <asm/uaccess.h>
-
-#define MAX_CMDLEN 240
-#define MIN_INTERVAL 15
-static char vmwdt_cmd[MAX_CMDLEN] = "IPL";
-static bool vmwdt_conceal;
-
-static bool vmwdt_nowayout = WATCHDOG_NOWAYOUT;
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
-MODULE_DESCRIPTION("z/VM Watchdog Timer");
-module_param_string(cmd, vmwdt_cmd, MAX_CMDLEN, 0644);
-MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers");
-module_param_named(conceal, vmwdt_conceal, bool, 0644);
-MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog "
-               " is active");
-module_param_named(nowayout, vmwdt_nowayout, bool, 0);
-MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"
-               " (default=CONFIG_WATCHDOG_NOWAYOUT)");
-MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-
-static unsigned int vmwdt_interval = 60;
-static unsigned long vmwdt_is_open;
-static int vmwdt_expect_close;
-
-static DEFINE_MUTEX(vmwdt_mutex);
-
-#define VMWDT_OPEN     0       /* devnode is open or suspend in progress */
-#define VMWDT_RUNNING  1       /* The watchdog is armed */
-
-enum vmwdt_func {
-       /* function codes */
-       wdt_init   = 0,
-       wdt_change = 1,
-       wdt_cancel = 2,
-       /* flags */
-       wdt_conceal = 0x80000000,
-};
-
-static int __diag288(enum vmwdt_func func, unsigned int timeout,
-                           char *cmd, size_t len)
-{
-       register unsigned long __func asm("2") = func;
-       register unsigned long __timeout asm("3") = timeout;
-       register unsigned long __cmdp asm("4") = virt_to_phys(cmd);
-       register unsigned long __cmdl asm("5") = len;
-       int err;
-
-       err = -EINVAL;
-       asm volatile(
-               "       diag    %1,%3,0x288\n"
-               "0:     la      %0,0\n"
-               "1:\n"
-               EX_TABLE(0b,1b)
-               : "+d" (err) : "d"(__func), "d"(__timeout),
-                 "d"(__cmdp), "d"(__cmdl) : "1", "cc");
-       return err;
-}
-
-static int vmwdt_keepalive(void)
-{
-       /* we allocate new memory every time to avoid having
-        * to track the state. static allocation is not an
-        * option since that might not be contiguous in real
-        * storage in case of a modular build */
-       static char *ebc_cmd;
-       size_t len;
-       int ret;
-       unsigned int func;
-
-       ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
-       if (!ebc_cmd)
-               return -ENOMEM;
-
-       len = strlcpy(ebc_cmd, vmwdt_cmd, MAX_CMDLEN);
-       ASCEBC(ebc_cmd, MAX_CMDLEN);
-       EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
-
-       func = vmwdt_conceal ? (wdt_init | wdt_conceal) : wdt_init;
-       set_bit(VMWDT_RUNNING, &vmwdt_is_open);
-       ret = __diag288(func, vmwdt_interval, ebc_cmd, len);
-       WARN_ON(ret != 0);
-       kfree(ebc_cmd);
-       return ret;
-}
-
-static int vmwdt_disable(void)
-{
-       char cmd[] = {'\0'};
-       int ret = __diag288(wdt_cancel, 0, cmd, 0);
-       WARN_ON(ret != 0);
-       clear_bit(VMWDT_RUNNING, &vmwdt_is_open);
-       return ret;
-}
-
-static int __init vmwdt_probe(void)
-{
-       /* there is no real way to see if the watchdog is supported,
-        * so we try initializing it with a NOP command ("BEGIN")
-        * that won't cause any harm even if the following disable
-        * fails for some reason */
-       char ebc_begin[] = {
-               194, 197, 199, 201, 213
-       };
-       if (__diag288(wdt_init, 15, ebc_begin, sizeof(ebc_begin)) != 0)
-               return -EINVAL;
-       return vmwdt_disable();
-}
-
-static int vmwdt_open(struct inode *i, struct file *f)
-{
-       int ret;
-       if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open))
-               return -EBUSY;
-       ret = vmwdt_keepalive();
-       if (ret)
-               clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-       return ret ? ret : nonseekable_open(i, f);
-}
-
-static int vmwdt_close(struct inode *i, struct file *f)
-{
-       if (vmwdt_expect_close == 42)
-               vmwdt_disable();
-       vmwdt_expect_close = 0;
-       clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-       return 0;
-}
-
-static struct watchdog_info vmwdt_info = {
-       .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE,
-       .firmware_version = 0,
-       .identity = "z/VM Watchdog Timer",
-};
-
-static int __vmwdt_ioctl(unsigned int cmd, unsigned long arg)
-{
-       switch (cmd) {
-       case WDIOC_GETSUPPORT:
-               if (copy_to_user((void __user *)arg, &vmwdt_info,
-                                       sizeof(vmwdt_info)))
-                       return -EFAULT;
-               return 0;
-       case WDIOC_GETSTATUS:
-       case WDIOC_GETBOOTSTATUS:
-               return put_user(0, (int __user *)arg);
-       case WDIOC_GETTEMP:
-               return -EINVAL;
-       case WDIOC_SETOPTIONS:
-               {
-                       int options, ret;
-                       if (get_user(options, (int __user *)arg))
-                               return -EFAULT;
-                       ret = -EINVAL;
-                       if (options & WDIOS_DISABLECARD) {
-                               ret = vmwdt_disable();
-                               if (ret)
-                                       return ret;
-                       }
-                       if (options & WDIOS_ENABLECARD) {
-                               ret = vmwdt_keepalive();
-                       }
-                       return ret;
-               }
-       case WDIOC_GETTIMEOUT:
-               return put_user(vmwdt_interval, (int __user *)arg);
-       case WDIOC_SETTIMEOUT:
-               {
-                       int interval;
-                       if (get_user(interval, (int __user *)arg))
-                               return -EFAULT;
-                       if (interval < MIN_INTERVAL)
-                               return -EINVAL;
-                       vmwdt_interval = interval;
-               }
-               return vmwdt_keepalive();
-       case WDIOC_KEEPALIVE:
-               return vmwdt_keepalive();
-       }
-       return -EINVAL;
-}
-
-static long vmwdt_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
-{
-       int rc;
-
-       mutex_lock(&vmwdt_mutex);
-       rc = __vmwdt_ioctl(cmd, arg);
-       mutex_unlock(&vmwdt_mutex);
-       return (long) rc;
-}
-
-static ssize_t vmwdt_write(struct file *f, const char __user *buf,
-                               size_t count, loff_t *ppos)
-{
-       if(count) {
-               if (!vmwdt_nowayout) {
-                       size_t i;
-
-                       /* note: just in case someone wrote the magic character
-                        * five months ago... */
-                       vmwdt_expect_close = 0;
-
-                       for (i = 0; i != count; i++) {
-                               char c;
-                               if (get_user(c, buf+i))
-                                       return -EFAULT;
-                               if (c == 'V')
-                                       vmwdt_expect_close = 42;
-                       }
-               }
-               /* someone wrote to us, we should restart timer */
-               vmwdt_keepalive();
-       }
-       return count;
-}
-
-static int vmwdt_resume(void)
-{
-       clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-       return NOTIFY_DONE;
-}
-
-/*
- * It makes no sense to go into suspend while the watchdog is running.
- * Depending on the memory size, the watchdog might trigger, while we
- * are still saving the memory.
- * We reuse the open flag to ensure that suspend and watchdog open are
- * exclusive operations
- */
-static int vmwdt_suspend(void)
-{
-       if (test_and_set_bit(VMWDT_OPEN, &vmwdt_is_open)) {
-               pr_err("The system cannot be suspended while the watchdog"
-                       " is in use\n");
-               return notifier_from_errno(-EBUSY);
-       }
-       if (test_bit(VMWDT_RUNNING, &vmwdt_is_open)) {
-               clear_bit(VMWDT_OPEN, &vmwdt_is_open);
-               pr_err("The system cannot be suspended while the watchdog"
-                       " is running\n");
-               return notifier_from_errno(-EBUSY);
-       }
-       return NOTIFY_DONE;
-}
-
-/*
- * This function is called for suspend and resume.
- */
-static int vmwdt_power_event(struct notifier_block *this, unsigned long event,
-                            void *ptr)
-{
-       switch (event) {
-       case PM_POST_HIBERNATION:
-       case PM_POST_SUSPEND:
-               return vmwdt_resume();
-       case PM_HIBERNATION_PREPARE:
-       case PM_SUSPEND_PREPARE:
-               return vmwdt_suspend();
-       default:
-               return NOTIFY_DONE;
-       }
-}
-
-static struct notifier_block vmwdt_power_notifier = {
-       .notifier_call = vmwdt_power_event,
-};
-
-static const struct file_operations vmwdt_fops = {
-       .open    = &vmwdt_open,
-       .release = &vmwdt_close,
-       .unlocked_ioctl = &vmwdt_ioctl,
-       .write   = &vmwdt_write,
-       .owner   = THIS_MODULE,
-       .llseek  = noop_llseek,
-};
-
-static struct miscdevice vmwdt_dev = {
-       .minor      = WATCHDOG_MINOR,
-       .name       = "watchdog",
-       .fops       = &vmwdt_fops,
-};
-
-static int __init vmwdt_init(void)
-{
-       int ret;
-
-       ret = vmwdt_probe();
-       if (ret)
-               return ret;
-       ret = register_pm_notifier(&vmwdt_power_notifier);
-       if (ret)
-               return ret;
-       /*
-        * misc_register() has to be the last action in module_init(), because
-        * file operations will be available right after this.
-        */
-       ret = misc_register(&vmwdt_dev);
-       if (ret) {
-               unregister_pm_notifier(&vmwdt_power_notifier);
-               return ret;
-       }
-       return 0;
-}
-module_init(vmwdt_init);
-
-static void __exit vmwdt_exit(void)
-{
-       unregister_pm_notifier(&vmwdt_power_notifier);
-       misc_deregister(&vmwdt_dev);
-}
-module_exit(vmwdt_exit);
index 445564c..00bfbee 100644 (file)
@@ -196,11 +196,11 @@ EXPORT_SYMBOL(airq_iv_release);
  */
 unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
 {
-       unsigned long bit, i;
+       unsigned long bit, i, flags;
 
        if (!iv->avail || num == 0)
                return -1UL;
-       spin_lock(&iv->lock);
+       spin_lock_irqsave(&iv->lock, flags);
        bit = find_first_bit_inv(iv->avail, iv->bits);
        while (bit + num <= iv->bits) {
                for (i = 1; i < num; i++)
@@ -218,9 +218,8 @@ unsigned long airq_iv_alloc(struct airq_iv *iv, unsigned long num)
        }
        if (bit + num > iv->bits)
                bit = -1UL;
-       spin_unlock(&iv->lock);
+       spin_unlock_irqrestore(&iv->lock, flags);
        return bit;
-
 }
 EXPORT_SYMBOL(airq_iv_alloc);
 
@@ -232,11 +231,11 @@ EXPORT_SYMBOL(airq_iv_alloc);
  */
 void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
 {
-       unsigned long i;
+       unsigned long i, flags;
 
        if (!iv->avail || num == 0)
                return;
-       spin_lock(&iv->lock);
+       spin_lock_irqsave(&iv->lock, flags);
        for (i = 0; i < num; i++) {
                /* Clear (possibly left over) interrupt bit */
                clear_bit_inv(bit + i, iv->vector);
@@ -248,7 +247,7 @@ void airq_iv_free(struct airq_iv *iv, unsigned long bit, unsigned long num)
                while (iv->end > 0 && !test_bit_inv(iv->end - 1, iv->avail))
                        iv->end--;
        }
-       spin_unlock(&iv->lock);
+       spin_unlock_irqrestore(&iv->lock, flags);
 }
 EXPORT_SYMBOL(airq_iv_free);
 
index dfd7bc6..e443b0d 100644 (file)
@@ -184,7 +184,7 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
                                      const char *buf, size_t count)
 {
        struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
-       int rc;
+       int rc = 0;
 
        /* Prevent concurrent online/offline processing and ungrouping. */
        if (atomic_cmpxchg(&gdev->onoff, 0, 1) != 0)
@@ -196,11 +196,12 @@ static ssize_t ccwgroup_ungroup_store(struct device *dev,
 
        if (device_remove_file_self(dev, attr))
                ccwgroup_ungroup(gdev);
+       else
+               rc = -ENODEV;
 out:
        if (rc) {
-               if (rc != -EAGAIN)
-                       /* Release onoff "lock" when ungrouping failed. */
-                       atomic_set(&gdev->onoff, 0);
+               /* Release onoff "lock" when ungrouping failed. */
+               atomic_set(&gdev->onoff, 0);
                return rc;
        }
        return count;
@@ -227,6 +228,7 @@ static void ccwgroup_ungroup_workfn(struct work_struct *work)
                container_of(work, struct ccwgroup_device, ungroup_work);
 
        ccwgroup_ungroup(gdev);
+       put_device(&gdev->dev);
 }
 
 static void ccwgroup_release(struct device *dev)
@@ -412,8 +414,10 @@ static int ccwgroup_notifier(struct notifier_block *nb, unsigned long action,
 {
        struct ccwgroup_device *gdev = to_ccwgroupdev(data);
 
-       if (action == BUS_NOTIFY_UNBIND_DRIVER)
+       if (action == BUS_NOTIFY_UNBIND_DRIVER) {
+               get_device(&gdev->dev);
                schedule_work(&gdev->ungroup_work);
+       }
 
        return NOTIFY_OK;
 }
@@ -582,11 +586,7 @@ void ccwgroup_driver_unregister(struct ccwgroup_driver *cdriver)
                                         __ccwgroup_match_all))) {
                struct ccwgroup_device *gdev = to_ccwgroupdev(dev);
 
-               mutex_lock(&gdev->reg_mutex);
-               __ccwgroup_remove_symlinks(gdev);
-               device_unregister(dev);
-               __ccwgroup_remove_cdev_refs(gdev);
-               mutex_unlock(&gdev->reg_mutex);
+               ccwgroup_ungroup(gdev);
                put_device(dev);
        }
        driver_unregister(&cdriver->driver);
@@ -633,13 +633,7 @@ void ccwgroup_remove_ccwdev(struct ccw_device *cdev)
        get_device(&gdev->dev);
        spin_unlock_irq(cdev->ccwlock);
        /* Unregister group device. */
-       mutex_lock(&gdev->reg_mutex);
-       if (device_is_registered(&gdev->dev)) {
-               __ccwgroup_remove_symlinks(gdev);
-               device_unregister(&gdev->dev);
-               __ccwgroup_remove_cdev_refs(gdev);
-       }
-       mutex_unlock(&gdev->reg_mutex);
+       ccwgroup_ungroup(gdev);
        /* Release ccwgroup device reference for local processing. */
        put_device(&gdev->dev);
 }
index 77f9c92..2905d8b 100644 (file)
@@ -602,6 +602,7 @@ void __init init_cio_interrupts(void)
 
 #ifdef CONFIG_CCW_CONSOLE
 static struct subchannel *console_sch;
+static struct lock_class_key console_sch_key;
 
 /*
  * Use cio_tsch to update the subchannel status and call the interrupt handler
@@ -686,6 +687,7 @@ struct subchannel *cio_probe_console(void)
        if (IS_ERR(sch))
                return sch;
 
+       lockdep_set_class(sch->lock, &console_sch_key);
        isc_register(CONSOLE_ISC);
        sch->config.isc = CONSOLE_ISC;
        sch->config.intparm = (u32)(addr_t)sch;
index d8d9b5b..dfef5e6 100644 (file)
@@ -678,18 +678,11 @@ static const struct attribute_group *ccwdev_attr_groups[] = {
        NULL,
 };
 
-/* this is a simple abstraction for device_register that sets the
- * correct bus type and adds the bus specific files */
-static int ccw_device_register(struct ccw_device *cdev)
+static int ccw_device_add(struct ccw_device *cdev)
 {
        struct device *dev = &cdev->dev;
-       int ret;
 
        dev->bus = &ccw_bus_type;
-       ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
-                          cdev->private->dev_id.devno);
-       if (ret)
-               return ret;
        return device_add(dev);
 }
 
@@ -764,22 +757,46 @@ static void ccw_device_todo(struct work_struct *work);
 static int io_subchannel_initialize_dev(struct subchannel *sch,
                                        struct ccw_device *cdev)
 {
-       cdev->private->cdev = cdev;
-       cdev->private->int_class = IRQIO_CIO;
-       atomic_set(&cdev->private->onoff, 0);
+       struct ccw_device_private *priv = cdev->private;
+       int ret;
+
+       priv->cdev = cdev;
+       priv->int_class = IRQIO_CIO;
+       priv->state = DEV_STATE_NOT_OPER;
+       priv->dev_id.devno = sch->schib.pmcw.dev;
+       priv->dev_id.ssid = sch->schid.ssid;
+       priv->schid = sch->schid;
+
+       INIT_WORK(&priv->todo_work, ccw_device_todo);
+       INIT_LIST_HEAD(&priv->cmb_list);
+       init_waitqueue_head(&priv->wait_q);
+       init_timer(&priv->timer);
+
+       atomic_set(&priv->onoff, 0);
+       cdev->ccwlock = sch->lock;
        cdev->dev.parent = &sch->dev;
        cdev->dev.release = ccw_device_release;
-       INIT_WORK(&cdev->private->todo_work, ccw_device_todo);
        cdev->dev.groups = ccwdev_attr_groups;
        /* Do first half of device_register. */
        device_initialize(&cdev->dev);
+       ret = dev_set_name(&cdev->dev, "0.%x.%04x", cdev->private->dev_id.ssid,
+                          cdev->private->dev_id.devno);
+       if (ret)
+               goto out_put;
        if (!get_device(&sch->dev)) {
-               /* Release reference from device_initialize(). */
-               put_device(&cdev->dev);
-               return -ENODEV;
+               ret = -ENODEV;
+               goto out_put;
        }
-       cdev->private->flags.initialized = 1;
+       priv->flags.initialized = 1;
+       spin_lock_irq(sch->lock);
+       sch_set_cdev(sch, cdev);
+       spin_unlock_irq(sch->lock);
        return 0;
+
+out_put:
+       /* Release reference from device_initialize(). */
+       put_device(&cdev->dev);
+       return ret;
 }
 
 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
@@ -858,7 +875,7 @@ static void io_subchannel_register(struct ccw_device *cdev)
        dev_set_uevent_suppress(&sch->dev, 0);
        kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
        /* make it known to the system */
-       ret = ccw_device_register(cdev);
+       ret = ccw_device_add(cdev);
        if (ret) {
                CIO_MSG_EVENT(0, "Could not register ccw dev 0.%x.%04x: %d\n",
                              cdev->private->dev_id.ssid,
@@ -923,26 +940,11 @@ io_subchannel_recog_done(struct ccw_device *cdev)
 
 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
 {
-       struct ccw_device_private *priv;
-
-       cdev->ccwlock = sch->lock;
-
-       /* Init private data. */
-       priv = cdev->private;
-       priv->dev_id.devno = sch->schib.pmcw.dev;
-       priv->dev_id.ssid = sch->schid.ssid;
-       priv->schid = sch->schid;
-       priv->state = DEV_STATE_NOT_OPER;
-       INIT_LIST_HEAD(&priv->cmb_list);
-       init_waitqueue_head(&priv->wait_q);
-       init_timer(&priv->timer);
-
        /* Increase counter of devices currently in recognition. */
        atomic_inc(&ccw_device_init_count);
 
        /* Start async. device sensing. */
        spin_lock_irq(sch->lock);
-       sch_set_cdev(sch, cdev);
        ccw_device_recognition(cdev);
        spin_unlock_irq(sch->lock);
 }
@@ -1083,7 +1085,7 @@ static int io_subchannel_probe(struct subchannel *sch)
                dev_set_uevent_suppress(&sch->dev, 0);
                kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
                cdev = sch_get_cdev(sch);
-               rc = ccw_device_register(cdev);
+               rc = ccw_device_add(cdev);
                if (rc) {
                        /* Release online reference. */
                        put_device(&cdev->dev);
@@ -1597,7 +1599,6 @@ int __init ccw_device_enable_console(struct ccw_device *cdev)
        if (rc)
                return rc;
        sch->driver = &io_subchannel_driver;
-       sch_set_cdev(sch, cdev);
        io_subchannel_recog(cdev, sch);
        /* Now wait for the async. recognition to come to an end. */
        spin_lock_irq(cdev->ccwlock);
@@ -1639,6 +1640,7 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
                put_device(&sch->dev);
                return ERR_PTR(-ENOMEM);
        }
+       set_io_private(sch, io_priv);
        cdev = io_subchannel_create_ccwdev(sch);
        if (IS_ERR(cdev)) {
                put_device(&sch->dev);
@@ -1646,7 +1648,6 @@ struct ccw_device * __init ccw_device_create_console(struct ccw_driver *drv)
                return cdev;
        }
        cdev->drv = drv;
-       set_io_private(sch, io_priv);
        ccw_device_set_int_class(cdev);
        return cdev;
 }
index 4221b02..f1f3baa 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/debugfs.h>
 #include <linux/uaccess.h>
 #include <linux/export.h>
+#include <linux/slab.h>
 #include <asm/debug.h>
 #include "qdio_debug.h"
 #include "qdio.h"
@@ -16,11 +17,51 @@ debug_info_t *qdio_dbf_error;
 
 static struct dentry *debugfs_root;
 #define QDIO_DEBUGFS_NAME_LEN  10
+#define QDIO_DBF_NAME_LEN      20
 
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+struct qdio_dbf_entry {
+       char dbf_name[QDIO_DBF_NAME_LEN];
+       debug_info_t *dbf_info;
+       struct list_head dbf_list;
+};
+
+static LIST_HEAD(qdio_dbf_list);
+static DEFINE_MUTEX(qdio_dbf_list_mutex);
+
+static debug_info_t *qdio_get_dbf_entry(char *name)
+{
+       struct qdio_dbf_entry *entry;
+       debug_info_t *rc = NULL;
+
+       mutex_lock(&qdio_dbf_list_mutex);
+       list_for_each_entry(entry, &qdio_dbf_list, dbf_list) {
+               if (strcmp(entry->dbf_name, name) == 0) {
+                       rc = entry->dbf_info;
+                       break;
+               }
+       }
+       mutex_unlock(&qdio_dbf_list_mutex);
+       return rc;
+}
+
+static void qdio_clear_dbf_list(void)
+{
+       struct qdio_dbf_entry *entry, *tmp;
+
+       mutex_lock(&qdio_dbf_list_mutex);
+       list_for_each_entry_safe(entry, tmp, &qdio_dbf_list, dbf_list) {
+               list_del(&entry->dbf_list);
+               debug_unregister(entry->dbf_info);
+               kfree(entry);
+       }
+       mutex_unlock(&qdio_dbf_list_mutex);
+}
+
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
                       struct qdio_irq *irq_ptr)
 {
-       char text[20];
+       char text[QDIO_DBF_NAME_LEN];
+       struct qdio_dbf_entry *new_entry;
 
        DBF_EVENT("qfmt:%1d", init_data->q_format);
        DBF_HEX(init_data->adapter_name, 8);
@@ -38,11 +79,34 @@ void qdio_allocate_dbf(struct qdio_initialize *init_data,
        DBF_EVENT("irq:%8lx", (unsigned long)irq_ptr);
 
        /* allocate trace view for the interface */
-       snprintf(text, 20, "qdio_%s", dev_name(&init_data->cdev->dev));
-       irq_ptr->debug_area = debug_register(text, 2, 1, 16);
-       debug_register_view(irq_ptr->debug_area, &debug_hex_ascii_view);
-       debug_set_level(irq_ptr->debug_area, DBF_WARN);
-       DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+       snprintf(text, QDIO_DBF_NAME_LEN, "qdio_%s",
+                                       dev_name(&init_data->cdev->dev));
+       irq_ptr->debug_area = qdio_get_dbf_entry(text);
+       if (irq_ptr->debug_area)
+               DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf reused");
+       else {
+               irq_ptr->debug_area = debug_register(text, 2, 1, 16);
+               if (!irq_ptr->debug_area)
+                       return -ENOMEM;
+               if (debug_register_view(irq_ptr->debug_area,
+                                               &debug_hex_ascii_view)) {
+                       debug_unregister(irq_ptr->debug_area);
+                       return -ENOMEM;
+               }
+               debug_set_level(irq_ptr->debug_area, DBF_WARN);
+               DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf created");
+               new_entry = kzalloc(sizeof(struct qdio_dbf_entry), GFP_KERNEL);
+               if (!new_entry) {
+                       debug_unregister(irq_ptr->debug_area);
+                       return -ENOMEM;
+               }
+               strlcpy(new_entry->dbf_name, text, QDIO_DBF_NAME_LEN);
+               new_entry->dbf_info = irq_ptr->debug_area;
+               mutex_lock(&qdio_dbf_list_mutex);
+               list_add(&new_entry->dbf_list, &qdio_dbf_list);
+               mutex_unlock(&qdio_dbf_list_mutex);
+       }
+       return 0;
 }
 
 static int qstat_show(struct seq_file *m, void *v)
@@ -300,6 +364,7 @@ int __init qdio_debug_init(void)
 
 void qdio_debug_exit(void)
 {
+       qdio_clear_dbf_list();
        debugfs_remove(debugfs_root);
        if (qdio_dbf_setup)
                debug_unregister(qdio_dbf_setup);
index dfac9bf..f33ce85 100644 (file)
@@ -75,7 +75,7 @@ static inline void DBF_DEV_HEX(struct qdio_irq *dev, void *addr,
        }
 }
 
-void qdio_allocate_dbf(struct qdio_initialize *init_data,
+int qdio_allocate_dbf(struct qdio_initialize *init_data,
                       struct qdio_irq *irq_ptr);
 void qdio_setup_debug_entries(struct qdio_irq *irq_ptr,
                              struct ccw_device *cdev);
index 77466c4..848e3b6 100644 (file)
@@ -409,17 +409,16 @@ static inline void qdio_stop_polling(struct qdio_q *q)
                set_buf_state(q, q->u.in.ack_start, SLSB_P_INPUT_NOT_INIT);
 }
 
-static inline void account_sbals(struct qdio_q *q, int count)
+static inline void account_sbals(struct qdio_q *q, unsigned int count)
 {
-       int pos = 0;
+       int pos;
 
        q->q_stats.nr_sbal_total += count;
        if (count == QDIO_MAX_BUFFERS_MASK) {
                q->q_stats.nr_sbals[7]++;
                return;
        }
-       while (count >>= 1)
-               pos++;
+       pos = ilog2(count);
        q->q_stats.nr_sbals[pos]++;
 }
 
@@ -1234,12 +1233,10 @@ int qdio_free(struct ccw_device *cdev)
                return -ENODEV;
 
        DBF_EVENT("qfree:%4x", cdev->private->schid.sch_no);
+       DBF_DEV_EVENT(DBF_ERR, irq_ptr, "dbf abandoned");
        mutex_lock(&irq_ptr->setup_mutex);
 
-       if (irq_ptr->debug_area != NULL) {
-               debug_unregister(irq_ptr->debug_area);
-               irq_ptr->debug_area = NULL;
-       }
+       irq_ptr->debug_area = NULL;
        cdev->private->qdio_data = NULL;
        mutex_unlock(&irq_ptr->setup_mutex);
 
@@ -1276,7 +1273,8 @@ int qdio_allocate(struct qdio_initialize *init_data)
                goto out_err;
 
        mutex_init(&irq_ptr->setup_mutex);
-       qdio_allocate_dbf(init_data, irq_ptr);
+       if (qdio_allocate_dbf(init_data, irq_ptr))
+               goto out_rel;
 
        /*
         * Allocate a page for the chsc calls in qdio_establish.
index 8eec165..4038437 100644 (file)
@@ -77,12 +77,12 @@ MODULE_ALIAS("z90crypt");
  * Module parameter
  */
 int ap_domain_index = -1;      /* Adjunct Processor Domain Index */
-module_param_named(domain, ap_domain_index, int, 0000);
+module_param_named(domain, ap_domain_index, int, S_IRUSR|S_IRGRP);
 MODULE_PARM_DESC(domain, "domain index for ap devices");
 EXPORT_SYMBOL(ap_domain_index);
 
 static int ap_thread_flag = 0;
-module_param_named(poll_thread, ap_thread_flag, int, 0000);
+module_param_named(poll_thread, ap_thread_flag, int, S_IRUSR|S_IRGRP);
 MODULE_PARM_DESC(poll_thread, "Turn on/off poll thread, default is 0 (off).");
 
 static struct device *ap_root_device = NULL;
@@ -901,10 +901,15 @@ static int ap_device_probe(struct device *dev)
        int rc;
 
        ap_dev->drv = ap_drv;
+
+       spin_lock_bh(&ap_device_list_lock);
+       list_add(&ap_dev->list, &ap_device_list);
+       spin_unlock_bh(&ap_device_list_lock);
+
        rc = ap_drv->probe ? ap_drv->probe(ap_dev) : -ENODEV;
-       if (!rc) {
+       if (rc) {
                spin_lock_bh(&ap_device_list_lock);
-               list_add(&ap_dev->list, &ap_device_list);
+               list_del_init(&ap_dev->list);
                spin_unlock_bh(&ap_device_list_lock);
        }
        return rc;
index 5222ebe..0e18c5d 100644 (file)
@@ -356,7 +356,7 @@ struct zcrypt_ops *zcrypt_msgtype_request(unsigned char *name, int variant)
 
        zops = __ops_lookup(name, variant);
        if (!zops) {
-               request_module(name);
+               request_module("%s", name);
                zops = __ops_lookup(name, variant);
        }
        if ((!zops) || (!try_module_get(zops->owner)))
index d837c3c..fbc6701 100644 (file)
@@ -2915,7 +2915,7 @@ claw_new_device(struct ccwgroup_device *cgdev)
                        "failed with error code %d\n", ret);
                goto out;
        }
-       dev = alloc_netdev(0,"claw%d",claw_init_netdevice);
+       dev = alloc_netdev(0, "claw%d", NET_NAME_UNKNOWN, claw_init_netdevice);
        if (!dev) {
                dev_warn(&cgdev->dev,
                        "Activating the CLAW device failed\n");
index 03b6ad0..e056dd4 100644 (file)
@@ -1137,9 +1137,11 @@ static struct net_device *ctcm_init_netdevice(struct ctcm_priv *priv)
                return NULL;
 
        if (IS_MPC(priv))
-               dev = alloc_netdev(0, MPC_DEVICE_GENE, ctcm_dev_setup);
+               dev = alloc_netdev(0, MPC_DEVICE_GENE, NET_NAME_UNKNOWN,
+                                  ctcm_dev_setup);
        else
-               dev = alloc_netdev(0, CTC_DEVICE_GENE, ctcm_dev_setup);
+               dev = alloc_netdev(0, CTC_DEVICE_GENE, NET_NAME_UNKNOWN,
+                                  ctcm_dev_setup);
 
        if (!dev) {
                CTCM_DBF_TEXT_(ERROR, CTC_DBF_CRIT,
index ce16d1b..0a87809 100644 (file)
@@ -2015,7 +2015,7 @@ static struct net_device *netiucv_init_netdevice(char *username, char *userdata)
        struct net_device *dev;
 
        dev = alloc_netdev(sizeof(struct netiucv_priv), "iucv%d",
-                          netiucv_setup_netdevice);
+                          NET_NAME_UNKNOWN, netiucv_setup_netdevice);
        if (!dev)
                return NULL;
        rtnl_lock();
index a2088af..bbafbd0 100644 (file)
@@ -766,6 +766,11 @@ struct carrier_info {
        __u32 port_speed;
 };
 
+struct qeth_switch_info {
+       __u32 capabilities;
+       __u32 settings;
+};
+
 #define QETH_NAPI_WEIGHT NAPI_POLL_WEIGHT
 
 struct qeth_card {
@@ -946,6 +951,8 @@ struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
 int qeth_mdio_read(struct net_device *, int, int);
 int qeth_snmp_command(struct qeth_card *, char __user *);
 int qeth_query_oat_command(struct qeth_card *, char __user *);
+int qeth_query_switch_attributes(struct qeth_card *card,
+                                 struct qeth_switch_info *sw_info);
 int qeth_query_card_info(struct qeth_card *card,
        struct carrier_info *carrier_info);
 int qeth_send_control_data(struct qeth_card *, int, struct qeth_cmd_buffer *,
index f54bec5..71bfacf 100644 (file)
@@ -3037,6 +3037,45 @@ int qeth_query_ipassists(struct qeth_card *card, enum qeth_prot_versions prot)
 }
 EXPORT_SYMBOL_GPL(qeth_query_ipassists);
 
+static int qeth_query_switch_attributes_cb(struct qeth_card *card,
+                               struct qeth_reply *reply, unsigned long data)
+{
+       struct qeth_ipa_cmd *cmd;
+       struct qeth_switch_info *sw_info;
+       struct qeth_query_switch_attributes *attrs;
+
+       QETH_CARD_TEXT(card, 2, "qswiatcb");
+       cmd = (struct qeth_ipa_cmd *) data;
+       sw_info = (struct qeth_switch_info *)reply->param;
+       if (cmd->data.setadapterparms.hdr.return_code == 0) {
+               attrs = &cmd->data.setadapterparms.data.query_switch_attributes;
+               sw_info->capabilities = attrs->capabilities;
+               sw_info->settings = attrs->settings;
+               QETH_CARD_TEXT_(card, 2, "%04x%04x", sw_info->capabilities,
+                                                       sw_info->settings);
+       }
+       qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
+
+       return 0;
+}
+
+int qeth_query_switch_attributes(struct qeth_card *card,
+                                struct qeth_switch_info *sw_info)
+{
+       struct qeth_cmd_buffer *iob;
+
+       QETH_CARD_TEXT(card, 2, "qswiattr");
+       if (!qeth_adp_supported(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES))
+               return -EOPNOTSUPP;
+       if (!netif_carrier_ok(card->dev))
+               return -ENOMEDIUM;
+       iob = qeth_get_adapter_cmd(card, IPA_SETADP_QUERY_SWITCH_ATTRIBUTES,
+                               sizeof(struct qeth_ipacmd_setadpparms_hdr));
+       return qeth_send_ipa_cmd(card, iob,
+                               qeth_query_switch_attributes_cb, sw_info);
+}
+EXPORT_SYMBOL_GPL(qeth_query_switch_attributes);
+
 static int qeth_query_setdiagass_cb(struct qeth_card *card,
                struct qeth_reply *reply, unsigned long data)
 {
index cf6a90e..1558be1 100644 (file)
@@ -242,6 +242,7 @@ enum qeth_ipa_setadp_cmd {
        IPA_SETADP_SET_DIAG_ASSIST              = 0x00002000L,
        IPA_SETADP_SET_ACCESS_CONTROL           = 0x00010000L,
        IPA_SETADP_QUERY_OAT                    = 0x00080000L,
+       IPA_SETADP_QUERY_SWITCH_ATTRIBUTES      = 0x00100000L,
 };
 enum qeth_ipa_mac_ops {
        CHANGE_ADDR_READ_MAC            = 0,
@@ -431,6 +432,21 @@ struct qeth_query_card_info {
        __u32   reserved2;
 };
 
+#define QETH_SWITCH_FORW_802_1         0x00000001
+#define QETH_SWITCH_FORW_REFL_RELAY    0x00000002
+#define QETH_SWITCH_CAP_RTE            0x00000004
+#define QETH_SWITCH_CAP_ECP            0x00000008
+#define QETH_SWITCH_CAP_VDP            0x00000010
+
+struct qeth_query_switch_attributes {
+       __u8  version;
+       __u8  reserved1;
+       __u16 reserved2;
+       __u32 capabilities;
+       __u32 settings;
+       __u8  reserved3[8];
+};
+
 struct qeth_ipacmd_setadpparms_hdr {
        __u32 supp_hw_cmds;
        __u32 reserved1;
@@ -452,6 +468,7 @@ struct qeth_ipacmd_setadpparms {
                struct qeth_set_access_ctrl set_access_ctrl;
                struct qeth_query_oat query_oat;
                struct qeth_query_card_info card_info;
+               struct qeth_query_switch_attributes query_switch_attributes;
                __u32 mode;
        } data;
 } __attribute__ ((packed));
index 8a25a2b..15523f0 100644 (file)
@@ -543,7 +543,42 @@ out:
 }
 
 static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
-                  qeth_dev_isolation_store);
+                       qeth_dev_isolation_store);
+
+static ssize_t qeth_dev_switch_attrs_show(struct device *dev,
+                               struct device_attribute *attr, char *buf)
+{
+       struct qeth_card *card = dev_get_drvdata(dev);
+       struct qeth_switch_info sw_info;
+       int     rc = 0;
+
+       if (!card)
+               return -EINVAL;
+
+       if (card->state != CARD_STATE_SOFTSETUP && card->state != CARD_STATE_UP)
+               return sprintf(buf, "n/a\n");
+
+       rc = qeth_query_switch_attributes(card, &sw_info);
+       if (rc)
+               return rc;
+
+       if (!sw_info.capabilities)
+               rc = sprintf(buf, "unknown");
+
+       if (sw_info.capabilities & QETH_SWITCH_FORW_802_1)
+               rc = sprintf(buf, (sw_info.settings & QETH_SWITCH_FORW_802_1 ?
+                                                       "[802.1]" : "802.1"));
+       if (sw_info.capabilities & QETH_SWITCH_FORW_REFL_RELAY)
+               rc += sprintf(buf + rc,
+                       (sw_info.settings & QETH_SWITCH_FORW_REFL_RELAY ?
+                                                       " [rr]" : " rr"));
+       rc += sprintf(buf + rc, "\n");
+
+       return rc;
+}
+
+static DEVICE_ATTR(switch_attrs, 0444,
+                  qeth_dev_switch_attrs_show, NULL);
 
 static ssize_t qeth_hw_trap_show(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@ -728,6 +763,7 @@ static struct attribute *qeth_device_attrs[] = {
        &dev_attr_layer2.attr,
        &dev_attr_isolation.attr,
        &dev_attr_hw_trap.attr,
+       &dev_attr_switch_attrs.attr,
        NULL,
 };
 static struct attribute_group qeth_device_attr_group = {
index 5ef5b4f..c2679bf 100644 (file)
@@ -952,10 +952,12 @@ static int qeth_l2_setup_netdev(struct qeth_card *card)
 {
        switch (card->info.type) {
        case QETH_CARD_TYPE_IQD:
-               card->dev = alloc_netdev(0, "hsi%d", ether_setup);
+               card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
+                                        ether_setup);
                break;
        case QETH_CARD_TYPE_OSN:
-               card->dev = alloc_netdev(0, "osn%d", ether_setup);
+               card->dev = alloc_netdev(0, "osn%d", NET_NAME_UNKNOWN,
+                                        ether_setup);
                card->dev->flags |= IFF_NOARP;
                break;
        default:
index 14e0b58..f8427a2 100644 (file)
@@ -3287,7 +3287,8 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
                        }
                }
        } else if (card->info.type == QETH_CARD_TYPE_IQD) {
-               card->dev = alloc_netdev(0, "hsi%d", ether_setup);
+               card->dev = alloc_netdev(0, "hsi%d", NET_NAME_UNKNOWN,
+                                        ether_setup);
                if (!card->dev)
                        return -ENODEV;
                card->dev->flags |= IFF_NOARP;
index 5543490..56467df 100644 (file)
@@ -4198,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
                kfree(phba->ep_array);
                phba->ep_array = NULL;
                ret = -ENOMEM;
+
+               goto free_memory;
        }
 
        for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
index 6045aa7..07934b0 100644 (file)
@@ -1008,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
                BE2_IPV6 : BE2_IPV4 ;
 
        rc = mgmt_get_if_info(phba, ip_type, &if_info);
-       if (rc) {
-               kfree(if_info);
+       if (rc)
                return rc;
-       }
 
        if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
                if (if_info->dhcp_state) {
index f548430..785d0d7 100644 (file)
@@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
        skb_pull(skb, sizeof(struct fcoe_hdr));
        fr_len = skb->len - sizeof(struct fcoe_crc_eof);
 
-       stats = per_cpu_ptr(lport->stats, get_cpu());
-       stats->RxFrames++;
-       stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
-
        fp = (struct fc_frame *)skb;
        fc_frame_init(fp);
        fr_dev(fp) = lport;
        fr_sof(fp) = hp->fcoe_sof;
        if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
-               put_cpu();
                kfree_skb(skb);
                return;
        }
        fr_eof(fp) = crc_eof.fcoe_eof;
        fr_crc(fp) = crc_eof.fcoe_crc32;
        if (pskb_trim(skb, fr_len)) {
-               put_cpu();
                kfree_skb(skb);
                return;
        }
@@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
                port = lport_priv(vn_port);
                if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
                        BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
-                       put_cpu();
                        kfree_skb(skb);
                        return;
                }
@@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
        if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
            fh->fh_type == FC_TYPE_FCP) {
                /* Drop FCP data. We dont this in L2 path */
-               put_cpu();
                kfree_skb(skb);
                return;
        }
@@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
                case ELS_LOGO:
                        if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
                                /* drop non-FIP LOGO */
-                               put_cpu();
                                kfree_skb(skb);
                                return;
                        }
@@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
 
        if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
                /* Drop incoming ABTS */
-               put_cpu();
                kfree_skb(skb);
                return;
        }
 
+       stats = per_cpu_ptr(lport->stats, smp_processor_id());
+       stats->RxFrames++;
+       stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+
        if (le32_to_cpu(fr_crc(fp)) !=
                        ~crc32(~0, skb->data, fr_len)) {
                if (stats->InvalidCRCCount < 5)
                        printk(KERN_WARNING PFX "dropping frame with "
                               "CRC error\n");
                stats->InvalidCRCCount++;
-               put_cpu();
                kfree_skb(skb);
                return;
        }
-       put_cpu();
        fc_exch_recv(lport, fp);
 }
 
index 32a5e0a..7bc47fc 100644 (file)
@@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
                                       arr_sz, GFP_KERNEL);
        if (!cmgr->free_list_lock) {
                printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
+               kfree(cmgr->free_list);
+               cmgr->free_list = NULL;
                goto mem_err;
        }
 
index e8ee5e5..d31f9e6 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/tcp.h>
 #include <net/dst.h>
 #include <linux/netdevice.h>
+#include <net/addrconf.h>
 
 #include "t4_regs.h"
 #include "t4_msg.h"
@@ -150,6 +151,7 @@ static struct scsi_transport_template *cxgb4i_stt;
  * The section below implments CPLs that related to iscsi tcp connection
  * open/close/abort and data send/receive.
  */
+
 #define DIV_ROUND_UP(n, d)     (((n) + (d) - 1) / (d))
 #define RCV_BUFSIZ_MASK                0x3FFU
 #define MAX_IMM_TX_PKT_LEN     128
@@ -179,6 +181,7 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
                                struct l2t_entry *e)
 {
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+       int t4 = is_t4(lldi->adapter_type);
        int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
        unsigned long long opt0;
        unsigned int opt2;
@@ -248,6 +251,97 @@ static void send_act_open_req(struct cxgbi_sock *csk, struct sk_buff *skb,
        }
 
        set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+
+       pr_info_ipaddr("t%d csk 0x%p,%u,0x%lx,%u, rss_qid %u.\n",
+                      (&csk->saddr), (&csk->daddr), t4 ? 4 : 5, csk,
+                      csk->state, csk->flags, csk->atid, csk->rss_qid);
+
+       cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
+}
+
+static void send_act_open_req6(struct cxgbi_sock *csk, struct sk_buff *skb,
+                              struct l2t_entry *e)
+{
+       struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+       int t4 = is_t4(lldi->adapter_type);
+       int wscale = cxgbi_sock_compute_wscale(csk->mss_idx);
+       unsigned long long opt0;
+       unsigned int opt2;
+       unsigned int qid_atid = ((unsigned int)csk->atid) |
+                                (((unsigned int)csk->rss_qid) << 14);
+
+       opt0 = KEEP_ALIVE(1) |
+               WND_SCALE(wscale) |
+               MSS_IDX(csk->mss_idx) |
+               L2T_IDX(((struct l2t_entry *)csk->l2t)->idx) |
+               TX_CHAN(csk->tx_chan) |
+               SMAC_SEL(csk->smac_idx) |
+               ULP_MODE(ULP_MODE_ISCSI) |
+               RCV_BUFSIZ(cxgb4i_rcv_win >> 10);
+
+       opt2 = RX_CHANNEL(0) |
+               RSS_QUEUE_VALID |
+               RX_FC_DISABLE |
+               RSS_QUEUE(csk->rss_qid);
+
+       if (t4) {
+               struct cpl_act_open_req6 *req =
+                           (struct cpl_act_open_req6 *)skb->head;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                                           qid_atid));
+               req->local_port = csk->saddr6.sin6_port;
+               req->peer_port = csk->daddr6.sin6_port;
+
+               req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+               req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+                                                                   8);
+               req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+               req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+                                                                   8);
+
+               req->opt0 = cpu_to_be64(opt0);
+
+               opt2 |= RX_FC_VALID;
+               req->opt2 = cpu_to_be32(opt2);
+
+               req->params = cpu_to_be32(cxgb4_select_ntuple(
+                                         csk->cdev->ports[csk->port_id],
+                                         csk->l2t));
+       } else {
+               struct cpl_t5_act_open_req6 *req =
+                               (struct cpl_t5_act_open_req6 *)skb->head;
+
+               INIT_TP_WR(req, 0);
+               OPCODE_TID(req) = cpu_to_be32(MK_OPCODE_TID(CPL_ACT_OPEN_REQ6,
+                                                           qid_atid));
+               req->local_port = csk->saddr6.sin6_port;
+               req->peer_port = csk->daddr6.sin6_port;
+               req->local_ip_hi = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr);
+               req->local_ip_lo = *(__be64 *)(csk->saddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->peer_ip_hi = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr);
+               req->peer_ip_lo = *(__be64 *)(csk->daddr6.sin6_addr.s6_addr +
+                                                                       8);
+               req->opt0 = cpu_to_be64(opt0);
+
+               opt2 |= T5_OPT_2_VALID;
+               req->opt2 = cpu_to_be32(opt2);
+
+               req->params = cpu_to_be64(V_FILTER_TUPLE(cxgb4_select_ntuple(
+                                         csk->cdev->ports[csk->port_id],
+                                         csk->l2t)));
+       }
+
+       set_wr_txq(skb, CPL_PRIORITY_SETUP, csk->port_id);
+
+       pr_info("t%d csk 0x%p,%u,0x%lx,%u, [%pI6]:%u-[%pI6]:%u, rss_qid %u.\n",
+               t4 ? 4 : 5, csk, csk->state, csk->flags, csk->atid,
+               &csk->saddr6.sin6_addr, ntohs(csk->saddr.sin_port),
+               &csk->daddr6.sin6_addr, ntohs(csk->daddr.sin_port),
+               csk->rss_qid);
+
        cxgb4_l2t_send(csk->cdev->ports[csk->port_id], skb, csk->l2t);
 }
 
@@ -586,9 +680,11 @@ static void do_act_establish(struct cxgbi_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx, tid %u, atid %u, rseq %u.\n",
-               csk, csk->state, csk->flags, tid, atid, rcv_isn);
+       pr_info_ipaddr("atid 0x%x, tid 0x%x, csk 0x%p,%u,0x%lx, isn %u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      atid, tid, csk, csk->state, csk->flags, rcv_isn);
+
+       module_put(THIS_MODULE);
 
        cxgbi_sock_get(csk);
        csk->tid = tid;
@@ -663,6 +759,9 @@ static void csk_act_open_retry_timer(unsigned long data)
        struct sk_buff *skb;
        struct cxgbi_sock *csk = (struct cxgbi_sock *)data;
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev);
+       void (*send_act_open_func)(struct cxgbi_sock *, struct sk_buff *,
+                                  struct l2t_entry *);
+       int t4 = is_t4(lldi->adapter_type), size, size6;
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p,%u,0x%lx,%u.\n",
@@ -670,20 +769,35 @@ static void csk_act_open_retry_timer(unsigned long data)
 
        cxgbi_sock_get(csk);
        spin_lock_bh(&csk->lock);
-       skb = alloc_wr(is_t4(lldi->adapter_type) ?
-                               sizeof(struct cpl_act_open_req) :
-                               sizeof(struct cpl_t5_act_open_req),
-                       0, GFP_ATOMIC);
+
+       if (t4) {
+               size = sizeof(struct cpl_act_open_req);
+               size6 = sizeof(struct cpl_act_open_req6);
+       } else {
+               size = sizeof(struct cpl_t5_act_open_req);
+               size6 = sizeof(struct cpl_t5_act_open_req6);
+       }
+
+       if (csk->csk_family == AF_INET) {
+               send_act_open_func = send_act_open_req;
+               skb = alloc_wr(size, 0, GFP_ATOMIC);
+       } else {
+               send_act_open_func = send_act_open_req6;
+               skb = alloc_wr(size6, 0, GFP_ATOMIC);
+       }
+
        if (!skb)
                cxgbi_sock_fail_act_open(csk, -ENOMEM);
        else {
                skb->sk = (struct sock *)csk;
                t4_set_arp_err_handler(skb, csk,
-                                       cxgbi_sock_act_open_req_arp_failure);
-               send_act_open_req(csk, skb, csk->l2t);
+                                      cxgbi_sock_act_open_req_arp_failure);
+               send_act_open_func(csk, skb, csk->l2t);
        }
+
        spin_unlock_bh(&csk->lock);
        cxgbi_sock_put(csk);
+
 }
 
 static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
@@ -703,10 +817,9 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
-       pr_info("%pI4:%u-%pI4:%u, atid %u,%u, status %u, csk 0x%p,%u,0x%lx.\n",
-               &csk->saddr.sin_addr.s_addr, ntohs(csk->saddr.sin_port),
-               &csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
-               atid, tid, status, csk, csk->state, csk->flags);
+       pr_info_ipaddr("tid %u/%u, status %u.\n"
+                      "csk 0x%p,%u,0x%lx. ", (&csk->saddr), (&csk->daddr),
+                      atid, tid, status, csk, csk->state, csk->flags);
 
        if (status == CPL_ERR_RTX_NEG_ADVICE)
                goto rel_skb;
@@ -746,9 +859,9 @@ static void do_peer_close(struct cxgbi_device *cdev, struct sk_buff *skb)
                pr_err("can't find connection for tid %u.\n", tid);
                goto rel_skb;
        }
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx,%u.\n",
-               csk, csk->state, csk->flags, csk->tid);
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      csk, csk->state, csk->flags, csk->tid);
        cxgbi_sock_rcv_peer_close(csk);
 rel_skb:
        __kfree_skb(skb);
@@ -767,9 +880,9 @@ static void do_close_con_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
                pr_err("can't find connection for tid %u.\n", tid);
                goto rel_skb;
        }
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx,%u.\n",
-               csk, csk->state, csk->flags, csk->tid);
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      csk, csk->state, csk->flags, csk->tid);
        cxgbi_sock_rcv_close_conn_rpl(csk, ntohl(rpl->snd_nxt));
 rel_skb:
        __kfree_skb(skb);
@@ -808,9 +921,9 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
                goto rel_skb;
        }
 
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,%u,0x%lx, tid %u, status 0x%x.\n",
-               csk, csk->state, csk->flags, csk->tid, req->status);
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+                      (&csk->saddr), (&csk->daddr),
+                      csk, csk->state, csk->flags, csk->tid, req->status);
 
        if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
            req->status == CPL_ERR_PERSIST_NEG_ADVICE)
@@ -851,10 +964,10 @@ static void do_abort_rpl_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
        if (!csk)
                goto rel_skb;
 
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "status 0x%x, csk 0x%p, s %u, 0x%lx.\n",
-               rpl->status, csk, csk ? csk->state : 0,
-               csk ? csk->flags : 0UL);
+       if (csk)
+               pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u, status %u.\n",
+                              (&csk->saddr), (&csk->daddr), csk,
+                              csk->state, csk->flags, csk->tid, rpl->status);
 
        if (rpl->status == CPL_ERR_ABORT_FAILED)
                goto rel_skb;
@@ -1163,15 +1276,35 @@ static int init_act_open(struct cxgbi_sock *csk)
        struct cxgbi_device *cdev = csk->cdev;
        struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev);
        struct net_device *ndev = cdev->ports[csk->port_id];
-       struct port_info *pi = netdev_priv(ndev);
        struct sk_buff *skb = NULL;
-       struct neighbour *n;
+       struct neighbour *n = NULL;
+       void *daddr;
        unsigned int step;
+       unsigned int size, size6;
+       int t4 = is_t4(lldi->adapter_type);
 
        log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
                "csk 0x%p,%u,0x%lx,%u.\n",
                csk, csk->state, csk->flags, csk->tid);
 
+       if (csk->csk_family == AF_INET)
+               daddr = &csk->daddr.sin_addr.s_addr;
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (csk->csk_family == AF_INET6)
+               daddr = &csk->daddr6.sin6_addr;
+#endif
+       else {
+               pr_err("address family 0x%x not supported\n", csk->csk_family);
+               goto rel_resource;
+       }
+
+       n = dst_neigh_lookup(csk->dst, daddr);
+
+       if (!n) {
+               pr_err("%s, can't get neighbour of csk->dst.\n", ndev->name);
+               goto rel_resource;
+       }
+
        csk->atid = cxgb4_alloc_atid(lldi->tids, csk);
        if (csk->atid < 0) {
                pr_err("%s, NO atid available.\n", ndev->name);
@@ -1192,10 +1325,19 @@ static int init_act_open(struct cxgbi_sock *csk)
        }
        cxgbi_sock_get(csk);
 
-       skb = alloc_wr(is_t4(lldi->adapter_type) ?
-                               sizeof(struct cpl_act_open_req) :
-                               sizeof(struct cpl_t5_act_open_req),
-                       0, GFP_ATOMIC);
+       if (t4) {
+               size = sizeof(struct cpl_act_open_req);
+               size6 = sizeof(struct cpl_act_open_req6);
+       } else {
+               size = sizeof(struct cpl_t5_act_open_req);
+               size6 = sizeof(struct cpl_t5_act_open_req6);
+       }
+
+       if (csk->csk_family == AF_INET)
+               skb = alloc_wr(size, 0, GFP_NOIO);
+       else
+               skb = alloc_wr(size6, 0, GFP_NOIO);
+
        if (!skb)
                goto rel_resource;
        skb->sk = (struct sock *)csk;
@@ -1211,19 +1353,27 @@ static int init_act_open(struct cxgbi_sock *csk)
        csk->txq_idx = cxgb4_port_idx(ndev) * step;
        step = lldi->nrxq / lldi->nchan;
        csk->rss_qid = lldi->rxq_ids[cxgb4_port_idx(ndev) * step];
-       csk->wr_max_cred = csk->wr_cred = lldi->wr_cred;
+       csk->wr_cred = lldi->wr_cred -
+                      DIV_ROUND_UP(sizeof(struct cpl_abort_req), 16);
+       csk->wr_max_cred = csk->wr_cred;
        csk->wr_una_cred = 0;
        cxgbi_sock_reset_wr_list(csk);
        csk->err = 0;
-       log_debug(1 << CXGBI_DBG_TOE | 1 << CXGBI_DBG_SOCK,
-               "csk 0x%p,p%d,%s, %u,%u,%u, mss %u,%u, smac %u.\n",
-               csk, pi->port_id, ndev->name, csk->tx_chan,
-               csk->txq_idx, csk->rss_qid, csk->mtu, csk->mss_idx,
-               csk->smac_idx);
 
+       pr_info_ipaddr("csk 0x%p,%u,0x%lx,%u,%u,%u, mtu %u,%u, smac %u.\n",
+                      (&csk->saddr), (&csk->daddr), csk, csk->state,
+                      csk->flags, csk->tx_chan, csk->txq_idx, csk->rss_qid,
+                      csk->mtu, csk->mss_idx, csk->smac_idx);
+
+       /* must wait for either a act_open_rpl or act_open_establish */
+       try_module_get(THIS_MODULE);
        cxgbi_sock_set_state(csk, CTP_ACTIVE_OPEN);
-       send_act_open_req(csk, skb, csk->l2t);
+       if (csk->csk_family == AF_INET)
+               send_act_open_req(csk, skb, csk->l2t);
+       else
+               send_act_open_req6(csk, skb, csk->l2t);
        neigh_release(n);
+
        return 0;
 
 rel_resource:
@@ -1487,6 +1637,129 @@ static int cxgb4i_ddp_init(struct cxgbi_device *cdev)
        return 0;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static int cxgbi_inet6addr_handler(struct notifier_block *this,
+                                  unsigned long event, void *data)
+{
+       struct inet6_ifaddr *ifa = data;
+       struct net_device *event_dev = ifa->idev->dev;
+       struct cxgbi_device *cdev;
+       int ret = NOTIFY_DONE;
+
+       if (event_dev->priv_flags & IFF_802_1Q_VLAN)
+               event_dev = vlan_dev_real_dev(event_dev);
+
+       cdev = cxgbi_device_find_by_netdev(event_dev, NULL);
+
+       if (!cdev)
+               return ret;
+
+       switch (event) {
+       case NETDEV_UP:
+               ret = cxgb4_clip_get(event_dev,
+                                    (const struct in6_addr *)
+                                    ((ifa)->addr.s6_addr));
+               if (ret < 0)
+                       return ret;
+
+               ret = NOTIFY_OK;
+               break;
+
+       case NETDEV_DOWN:
+               cxgb4_clip_release(event_dev,
+                                  (const struct in6_addr *)
+                                  ((ifa)->addr.s6_addr));
+               ret = NOTIFY_OK;
+               break;
+
+       default:
+               break;
+       }
+
+       return ret;
+}
+
+static struct notifier_block cxgbi_inet6addr_notifier = {
+       .notifier_call = cxgbi_inet6addr_handler
+};
+
+/* Retrieve IPv6 addresses from a root device (bond, vlan) associated with
+ * a physical device.
+ * The physical device reference is needed to send the actual CLIP command.
+ */
+static int update_dev_clip(struct net_device *root_dev, struct net_device *dev)
+{
+       struct inet6_dev *idev = NULL;
+       struct inet6_ifaddr *ifa;
+       int ret = 0;
+
+       idev = __in6_dev_get(root_dev);
+       if (!idev)
+               return ret;
+
+       read_lock_bh(&idev->lock);
+       list_for_each_entry(ifa, &idev->addr_list, if_list) {
+               pr_info("updating the clip for addr %pI6\n",
+                       ifa->addr.s6_addr);
+               ret = cxgb4_clip_get(dev, (const struct in6_addr *)
+                                    ifa->addr.s6_addr);
+               if (ret < 0)
+                       break;
+       }
+
+       read_unlock_bh(&idev->lock);
+       return ret;
+}
+
+static int update_root_dev_clip(struct net_device *dev)
+{
+       struct net_device *root_dev = NULL;
+       int i, ret = 0;
+
+       /* First populate the real net device's IPv6 address */
+       ret = update_dev_clip(dev, dev);
+       if (ret)
+               return ret;
+
+       /* Parse all bond and vlan devices layered on top of the physical dev */
+       root_dev = netdev_master_upper_dev_get(dev);
+       if (root_dev) {
+               ret = update_dev_clip(root_dev, dev);
+               if (ret)
+                       return ret;
+       }
+
+       for (i = 0; i < VLAN_N_VID; i++) {
+               root_dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), i);
+               if (!root_dev)
+                       continue;
+
+               ret = update_dev_clip(root_dev, dev);
+               if (ret)
+                       break;
+       }
+       return ret;
+}
+
+static void cxgbi_update_clip(struct cxgbi_device *cdev)
+{
+       int i;
+
+       rcu_read_lock();
+
+       for (i = 0; i < cdev->nports; i++) {
+               struct net_device *dev = cdev->ports[i];
+               int ret = 0;
+
+               if (dev)
+                       ret = update_root_dev_clip(dev);
+               if (ret < 0)
+                       break;
+       }
+       rcu_read_unlock();
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
 static void *t4_uld_add(const struct cxgb4_lld_info *lldi)
 {
        struct cxgbi_device *cdev;
@@ -1605,6 +1878,9 @@ static int t4_uld_state_change(void *handle, enum cxgb4_state state)
        switch (state) {
        case CXGB4_STATE_UP:
                pr_info("cdev 0x%p, UP.\n", cdev);
+#if IS_ENABLED(CONFIG_IPV6)
+               cxgbi_update_clip(cdev);
+#endif
                /* re-initialize */
                break;
        case CXGB4_STATE_START_RECOVERY:
@@ -1635,11 +1911,18 @@ static int __init cxgb4i_init_module(void)
        if (rc < 0)
                return rc;
        cxgb4_register_uld(CXGB4_ULD_ISCSI, &cxgb4i_uld_info);
+
+#if IS_ENABLED(CONFIG_IPV6)
+       register_inet6addr_notifier(&cxgbi_inet6addr_notifier);
+#endif
        return 0;
 }
 
 static void __exit cxgb4i_exit_module(void)
 {
+#if IS_ENABLED(CONFIG_IPV6)
+       unregister_inet6addr_notifier(&cxgbi_inet6addr_notifier);
+#endif
        cxgb4_unregister_uld(CXGB4_ULD_ISCSI);
        cxgbi_device_unregister_all(CXGBI_FLAG_DEV_T4);
        cxgbi_iscsi_cleanup(&cxgb4i_iscsi_transport, &cxgb4i_stt);
index b44c1cf..3d5322d 100644 (file)
 #include <linux/inet.h>
 #include <net/dst.h>
 #include <net/route.h>
+#include <net/ipv6.h>
+#include <net/ip6_route.h>
+#include <net/addrconf.h>
+
 #include <linux/inetdevice.h>  /* ip_dev_find */
 #include <linux/module.h>
 #include <net/tcp.h>
@@ -193,8 +197,8 @@ struct cxgbi_device *cxgbi_device_find_by_lldev(void *lldev)
 }
 EXPORT_SYMBOL_GPL(cxgbi_device_find_by_lldev);
 
-static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
-                                                       int *port)
+struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
+                                                int *port)
 {
        struct net_device *vdev = NULL;
        struct cxgbi_device *cdev, *tmp;
@@ -224,6 +228,40 @@ static struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *ndev,
                "ndev 0x%p, %s, NO match found.\n", ndev, ndev->name);
        return NULL;
 }
+EXPORT_SYMBOL_GPL(cxgbi_device_find_by_netdev);
+
+static struct cxgbi_device *cxgbi_device_find_by_mac(struct net_device *ndev,
+                                                    int *port)
+{
+       struct net_device *vdev = NULL;
+       struct cxgbi_device *cdev, *tmp;
+       int i;
+
+       if (ndev->priv_flags & IFF_802_1Q_VLAN) {
+               vdev = ndev;
+               ndev = vlan_dev_real_dev(ndev);
+               pr_info("vlan dev %s -> %s.\n", vdev->name, ndev->name);
+       }
+
+       mutex_lock(&cdev_mutex);
+       list_for_each_entry_safe(cdev, tmp, &cdev_list, list_head) {
+               for (i = 0; i < cdev->nports; i++) {
+                       if (!memcmp(ndev->dev_addr, cdev->ports[i]->dev_addr,
+                                   MAX_ADDR_LEN)) {
+                               cdev->hbas[i]->vdev = vdev;
+                               mutex_unlock(&cdev_mutex);
+                               if (port)
+                                       *port = i;
+                               return cdev;
+                       }
+               }
+       }
+       mutex_unlock(&cdev_mutex);
+       log_debug(1 << CXGBI_DBG_DEV,
+                 "ndev 0x%p, %s, NO match mac found.\n",
+                 ndev, ndev->name);
+       return NULL;
+}
 
 void cxgbi_hbas_remove(struct cxgbi_device *cdev)
 {
@@ -320,6 +358,7 @@ static int sock_get_port(struct cxgbi_sock *csk)
        struct cxgbi_ports_map *pmap = &cdev->pmap;
        unsigned int start;
        int idx;
+       __be16 *port;
 
        if (!pmap->max_connect) {
                pr_err("cdev 0x%p, p#%u %s, NO port map.\n",
@@ -327,9 +366,14 @@ static int sock_get_port(struct cxgbi_sock *csk)
                return -EADDRNOTAVAIL;
        }
 
-       if (csk->saddr.sin_port) {
+       if (csk->csk_family == AF_INET)
+               port = &csk->saddr.sin_port;
+       else /* ipv6 */
+               port = &csk->saddr6.sin6_port;
+
+       if (*port) {
                pr_err("source port NON-ZERO %u.\n",
-                       ntohs(csk->saddr.sin_port));
+                       ntohs(*port));
                return -EADDRINUSE;
        }
 
@@ -347,8 +391,7 @@ static int sock_get_port(struct cxgbi_sock *csk)
                        idx = 0;
                if (!pmap->port_csk[idx]) {
                        pmap->used++;
-                       csk->saddr.sin_port =
-                               htons(pmap->sport_base + idx);
+                       *port = htons(pmap->sport_base + idx);
                        pmap->next = idx;
                        pmap->port_csk[idx] = csk;
                        spin_unlock_bh(&pmap->lock);
@@ -374,16 +417,22 @@ static void sock_put_port(struct cxgbi_sock *csk)
 {
        struct cxgbi_device *cdev = csk->cdev;
        struct cxgbi_ports_map *pmap = &cdev->pmap;
+       __be16 *port;
 
-       if (csk->saddr.sin_port) {
-               int idx = ntohs(csk->saddr.sin_port) - pmap->sport_base;
+       if (csk->csk_family == AF_INET)
+               port = &csk->saddr.sin_port;
+       else /* ipv6 */
+               port = &csk->saddr6.sin6_port;
 
-               csk->saddr.sin_port = 0;
+       if (*port) {
+               int idx = ntohs(*port) - pmap->sport_base;
+
+               *port = 0;
                if (idx < 0 || idx >= pmap->max_connect) {
                        pr_err("cdev 0x%p, p#%u %s, port %u OOR.\n",
                                cdev, csk->port_id,
                                cdev->ports[csk->port_id]->name,
-                               ntohs(csk->saddr.sin_port));
+                               ntohs(*port));
                        return;
                }
 
@@ -479,17 +528,11 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
        int port = 0xFFFF;
        int err = 0;
 
-       if (daddr->sin_family != AF_INET) {
-               pr_info("address family 0x%x NOT supported.\n",
-                       daddr->sin_family);
-               err = -EAFNOSUPPORT;
-               goto err_out;
-       }
-
        rt = find_route_ipv4(&fl4, 0, daddr->sin_addr.s_addr, 0, daddr->sin_port, 0);
        if (!rt) {
                pr_info("no route to ipv4 0x%x, port %u.\n",
-                       daddr->sin_addr.s_addr, daddr->sin_port);
+                       be32_to_cpu(daddr->sin_addr.s_addr),
+                       be16_to_cpu(daddr->sin_port));
                err = -ENETUNREACH;
                goto err_out;
        }
@@ -537,9 +580,12 @@ static struct cxgbi_sock *cxgbi_check_route(struct sockaddr *dst_addr)
        csk->port_id = port;
        csk->mtu = mtu;
        csk->dst = dst;
+
+       csk->csk_family = AF_INET;
        csk->daddr.sin_addr.s_addr = daddr->sin_addr.s_addr;
        csk->daddr.sin_port = daddr->sin_port;
        csk->daddr.sin_family = daddr->sin_family;
+       csk->saddr.sin_family = daddr->sin_family;
        csk->saddr.sin_addr.s_addr = fl4.saddr;
        neigh_release(n);
 
@@ -556,6 +602,123 @@ err_out:
        return ERR_PTR(err);
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static struct rt6_info *find_route_ipv6(const struct in6_addr *saddr,
+                                       const struct in6_addr *daddr)
+{
+       struct flowi6 fl;
+
+       if (saddr)
+               memcpy(&fl.saddr, saddr, sizeof(struct in6_addr));
+       if (daddr)
+               memcpy(&fl.daddr, daddr, sizeof(struct in6_addr));
+       return (struct rt6_info *)ip6_route_output(&init_net, NULL, &fl);
+}
+
+static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
+{
+       struct sockaddr_in6 *daddr6 = (struct sockaddr_in6 *)dst_addr;
+       struct dst_entry *dst;
+       struct net_device *ndev;
+       struct cxgbi_device *cdev;
+       struct rt6_info *rt = NULL;
+       struct neighbour *n;
+       struct in6_addr pref_saddr;
+       struct cxgbi_sock *csk = NULL;
+       unsigned int mtu = 0;
+       int port = 0xFFFF;
+       int err = 0;
+
+       rt = find_route_ipv6(NULL, &daddr6->sin6_addr);
+
+       if (!rt) {
+               pr_info("no route to ipv6 %pI6 port %u\n",
+                       daddr6->sin6_addr.s6_addr,
+                       be16_to_cpu(daddr6->sin6_port));
+               err = -ENETUNREACH;
+               goto err_out;
+       }
+
+       dst = &rt->dst;
+
+       n = dst_neigh_lookup(dst, &daddr6->sin6_addr);
+
+       if (!n) {
+               pr_info("%pI6, port %u, dst no neighbour.\n",
+                       daddr6->sin6_addr.s6_addr,
+                       be16_to_cpu(daddr6->sin6_port));
+               err = -ENETUNREACH;
+               goto rel_rt;
+       }
+       ndev = n->dev;
+
+       if (ipv6_addr_is_multicast(&rt->rt6i_dst.addr)) {
+               pr_info("multi-cast route %pI6 port %u, dev %s.\n",
+                       daddr6->sin6_addr.s6_addr,
+                       ntohs(daddr6->sin6_port), ndev->name);
+               err = -ENETUNREACH;
+               goto rel_rt;
+       }
+
+       cdev = cxgbi_device_find_by_netdev(ndev, &port);
+       if (!cdev)
+               cdev = cxgbi_device_find_by_mac(ndev, &port);
+       if (!cdev) {
+               pr_info("dst %pI6 %s, NOT cxgbi device.\n",
+                       daddr6->sin6_addr.s6_addr, ndev->name);
+               err = -ENETUNREACH;
+               goto rel_rt;
+       }
+       log_debug(1 << CXGBI_DBG_SOCK,
+                 "route to %pI6 :%u, ndev p#%d,%s, cdev 0x%p.\n",
+                 daddr6->sin6_addr.s6_addr, ntohs(daddr6->sin6_port), port,
+                 ndev->name, cdev);
+
+       csk = cxgbi_sock_create(cdev);
+       if (!csk) {
+               err = -ENOMEM;
+               goto rel_rt;
+       }
+       csk->cdev = cdev;
+       csk->port_id = port;
+       csk->mtu = mtu;
+       csk->dst = dst;
+
+       if (ipv6_addr_any(&rt->rt6i_prefsrc.addr)) {
+               struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
+
+               err = ipv6_dev_get_saddr(&init_net, idev ? idev->dev : NULL,
+                                        &daddr6->sin6_addr, 0, &pref_saddr);
+               if (err) {
+                       pr_info("failed to get source address to reach %pI6\n",
+                               &daddr6->sin6_addr);
+                       goto rel_rt;
+               }
+       } else {
+               pref_saddr = rt->rt6i_prefsrc.addr;
+       }
+
+       csk->csk_family = AF_INET6;
+       csk->daddr6.sin6_addr = daddr6->sin6_addr;
+       csk->daddr6.sin6_port = daddr6->sin6_port;
+       csk->daddr6.sin6_family = daddr6->sin6_family;
+       csk->saddr6.sin6_addr = pref_saddr;
+
+       neigh_release(n);
+       return csk;
+
+rel_rt:
+       if (n)
+               neigh_release(n);
+
+       ip6_rt_put(rt);
+       if (csk)
+               cxgbi_sock_closed(csk);
+err_out:
+       return ERR_PTR(err);
+}
+#endif /* IS_ENABLED(CONFIG_IPV6) */
+
 void cxgbi_sock_established(struct cxgbi_sock *csk, unsigned int snd_isn,
                        unsigned int opt)
 {
@@ -2194,6 +2357,34 @@ int cxgbi_set_conn_param(struct iscsi_cls_conn *cls_conn,
 }
 EXPORT_SYMBOL_GPL(cxgbi_set_conn_param);
 
+static inline int csk_print_port(struct cxgbi_sock *csk, char *buf)
+{
+       int len;
+
+       cxgbi_sock_get(csk);
+       len = sprintf(buf, "%hu\n", ntohs(csk->daddr.sin_port));
+       cxgbi_sock_put(csk);
+
+       return len;
+}
+
+static inline int csk_print_ip(struct cxgbi_sock *csk, char *buf)
+{
+       int len;
+
+       cxgbi_sock_get(csk);
+       if (csk->csk_family == AF_INET)
+               len = sprintf(buf, "%pI4",
+                             &csk->daddr.sin_addr.s_addr);
+       else
+               len = sprintf(buf, "%pI6",
+                             &csk->daddr6.sin6_addr);
+
+       cxgbi_sock_put(csk);
+
+       return len;
+}
+
 int cxgbi_get_ep_param(struct iscsi_endpoint *ep, enum iscsi_param param,
                       char *buf)
 {
@@ -2447,7 +2638,19 @@ struct iscsi_endpoint *cxgbi_ep_connect(struct Scsi_Host *shost,
                }
        }
 
-       csk = cxgbi_check_route(dst_addr);
+       if (dst_addr->sa_family == AF_INET) {
+               csk = cxgbi_check_route(dst_addr);
+#if IS_ENABLED(CONFIG_IPV6)
+       } else if (dst_addr->sa_family == AF_INET6) {
+               csk = cxgbi_check_route6(dst_addr);
+#endif
+       } else {
+               pr_info("address family 0x%x NOT supported.\n",
+                       dst_addr->sa_family);
+               err = -EAFNOSUPPORT;
+               return (struct iscsi_endpoint *)ERR_PTR(err);
+       }
+
        if (IS_ERR(csk))
                return (struct iscsi_endpoint *)csk;
        cxgbi_sock_get(csk);
index 8135f04..8ad73d9 100644 (file)
@@ -44,6 +44,15 @@ enum cxgbi_dbg_flag {
                        pr_info(fmt, ##__VA_ARGS__); \
        } while (0)
 
+#define pr_info_ipaddr(fmt_trail,                                      \
+                       addr1, addr2, args_trail...)                    \
+do {                                                                   \
+       if (!((1 << CXGBI_DBG_SOCK) & dbg_level))                       \
+               break;                                                  \
+       pr_info("%pISpc - %pISpc, " fmt_trail,                          \
+               addr1, addr2, args_trail);                              \
+} while (0)
+
 /* max. connections per adapter */
 #define CXGBI_MAX_CONN         16384
 
@@ -202,8 +211,15 @@ struct cxgbi_sock {
        spinlock_t lock;
        struct kref refcnt;
        unsigned int state;
-       struct sockaddr_in saddr;
-       struct sockaddr_in daddr;
+       unsigned int csk_family;
+       union {
+               struct sockaddr_in saddr;
+               struct sockaddr_in6 saddr6;
+       };
+       union {
+               struct sockaddr_in daddr;
+               struct sockaddr_in6 daddr6;
+       };
        struct dst_entry *dst;
        struct sk_buff_head receive_queue;
        struct sk_buff_head write_queue;
@@ -692,6 +708,7 @@ struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
 void cxgbi_device_unregister(struct cxgbi_device *);
 void cxgbi_device_unregister_all(unsigned int flag);
 struct cxgbi_device *cxgbi_device_find_by_lldev(void *);
+struct cxgbi_device *cxgbi_device_find_by_netdev(struct net_device *, int *);
 int cxgbi_hbas_add(struct cxgbi_device *, unsigned int, unsigned int,
                        struct scsi_host_template *,
                        struct scsi_transport_template *);
index 2ebfb2b..7b23f21 100644 (file)
@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
        if (crq->valid & 0x80) {
                if (++queue->cur == queue->size)
                        queue->cur = 0;
+
+               /* Ensure the read of the valid bit occurs before reading any
+                * other bits of the CRQ entry
+                */
+               rmb();
        } else
                crq = NULL;
        spin_unlock_irqrestore(&queue->lock, flags);
@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
 {
        struct vio_dev *vdev = to_vio_dev(hostdata->dev);
 
+       /*
+        * Ensure the command buffer is flushed to memory before handing it
+        * over to the VIOS to prevent it from fetching any stale data.
+        */
+       mb();
        return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
 }
 
@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
                                       evt->hostdata->dev);
                        if (evt->cmnd_done)
                                evt->cmnd_done(evt->cmnd);
-               } else if (evt->done)
+               } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
+                          evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
                        evt->done(evt);
                free_event_struct(&evt->hostdata->pool, evt);
                spin_lock_irqsave(hostdata->host->host_lock, flags);
index 1e4479f..9270d15 100644 (file)
@@ -564,7 +564,7 @@ static void mvs_94xx_interrupt_enable(struct mvs_info *mvi)
        u32 tmp;
 
        tmp = mr32(MVS_GBL_CTL);
-       tmp |= (IRQ_SAS_A | IRQ_SAS_B);
+       tmp |= (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
        mw32(MVS_GBL_INT_STAT, tmp);
        writel(tmp, regs + 0x0C);
        writel(tmp, regs + 0x10);
@@ -580,7 +580,7 @@ static void mvs_94xx_interrupt_disable(struct mvs_info *mvi)
 
        tmp = mr32(MVS_GBL_CTL);
 
-       tmp &= ~(IRQ_SAS_A | IRQ_SAS_B);
+       tmp &= ~(MVS_IRQ_SAS_A | MVS_IRQ_SAS_B);
        mw32(MVS_GBL_INT_STAT, tmp);
        writel(tmp, regs + 0x0C);
        writel(tmp, regs + 0x10);
@@ -596,7 +596,7 @@ static u32 mvs_94xx_isr_status(struct mvs_info *mvi, int irq)
        if (!(mvi->flags & MVF_FLAG_SOC)) {
                stat = mr32(MVS_GBL_INT_STAT);
 
-               if (!(stat & (IRQ_SAS_A | IRQ_SAS_B)))
+               if (!(stat & (MVS_IRQ_SAS_A | MVS_IRQ_SAS_B)))
                        return 0;
        }
        return stat;
@@ -606,8 +606,8 @@ static irqreturn_t mvs_94xx_isr(struct mvs_info *mvi, int irq, u32 stat)
 {
        void __iomem *regs = mvi->regs;
 
-       if (((stat & IRQ_SAS_A) && mvi->id == 0) ||
-                       ((stat & IRQ_SAS_B) && mvi->id == 1)) {
+       if (((stat & MVS_IRQ_SAS_A) && mvi->id == 0) ||
+                       ((stat & MVS_IRQ_SAS_B) && mvi->id == 1)) {
                mw32_f(MVS_INT_STAT, CINT_DONE);
 
                spin_lock(&mvi->lock);
index 487aa6f..14e1974 100644 (file)
@@ -150,35 +150,35 @@ enum chip_register_bits {
 
 enum pci_interrupt_cause {
        /*  MAIN_IRQ_CAUSE (R10200) Bits*/
-       IRQ_COM_IN_I2O_IOP0            = (1 << 0),
-       IRQ_COM_IN_I2O_IOP1            = (1 << 1),
-       IRQ_COM_IN_I2O_IOP2            = (1 << 2),
-       IRQ_COM_IN_I2O_IOP3            = (1 << 3),
-       IRQ_COM_OUT_I2O_HOS0           = (1 << 4),
-       IRQ_COM_OUT_I2O_HOS1           = (1 << 5),
-       IRQ_COM_OUT_I2O_HOS2           = (1 << 6),
-       IRQ_COM_OUT_I2O_HOS3           = (1 << 7),
-       IRQ_PCIF_TO_CPU_DRBL0          = (1 << 8),
-       IRQ_PCIF_TO_CPU_DRBL1          = (1 << 9),
-       IRQ_PCIF_TO_CPU_DRBL2          = (1 << 10),
-       IRQ_PCIF_TO_CPU_DRBL3          = (1 << 11),
-       IRQ_PCIF_DRBL0                 = (1 << 12),
-       IRQ_PCIF_DRBL1                 = (1 << 13),
-       IRQ_PCIF_DRBL2                 = (1 << 14),
-       IRQ_PCIF_DRBL3                 = (1 << 15),
-       IRQ_XOR_A                      = (1 << 16),
-       IRQ_XOR_B                      = (1 << 17),
-       IRQ_SAS_A                      = (1 << 18),
-       IRQ_SAS_B                      = (1 << 19),
-       IRQ_CPU_CNTRL                  = (1 << 20),
-       IRQ_GPIO                       = (1 << 21),
-       IRQ_UART                       = (1 << 22),
-       IRQ_SPI                        = (1 << 23),
-       IRQ_I2C                        = (1 << 24),
-       IRQ_SGPIO                      = (1 << 25),
-       IRQ_COM_ERR                    = (1 << 29),
-       IRQ_I2O_ERR                    = (1 << 30),
-       IRQ_PCIE_ERR                   = (1 << 31),
+       MVS_IRQ_COM_IN_I2O_IOP0        = (1 << 0),
+       MVS_IRQ_COM_IN_I2O_IOP1        = (1 << 1),
+       MVS_IRQ_COM_IN_I2O_IOP2        = (1 << 2),
+       MVS_IRQ_COM_IN_I2O_IOP3        = (1 << 3),
+       MVS_IRQ_COM_OUT_I2O_HOS0       = (1 << 4),
+       MVS_IRQ_COM_OUT_I2O_HOS1       = (1 << 5),
+       MVS_IRQ_COM_OUT_I2O_HOS2       = (1 << 6),
+       MVS_IRQ_COM_OUT_I2O_HOS3       = (1 << 7),
+       MVS_IRQ_PCIF_TO_CPU_DRBL0      = (1 << 8),
+       MVS_IRQ_PCIF_TO_CPU_DRBL1      = (1 << 9),
+       MVS_IRQ_PCIF_TO_CPU_DRBL2      = (1 << 10),
+       MVS_IRQ_PCIF_TO_CPU_DRBL3      = (1 << 11),
+       MVS_IRQ_PCIF_DRBL0             = (1 << 12),
+       MVS_IRQ_PCIF_DRBL1             = (1 << 13),
+       MVS_IRQ_PCIF_DRBL2             = (1 << 14),
+       MVS_IRQ_PCIF_DRBL3             = (1 << 15),
+       MVS_IRQ_XOR_A                  = (1 << 16),
+       MVS_IRQ_XOR_B                  = (1 << 17),
+       MVS_IRQ_SAS_A                  = (1 << 18),
+       MVS_IRQ_SAS_B                  = (1 << 19),
+       MVS_IRQ_CPU_CNTRL              = (1 << 20),
+       MVS_IRQ_GPIO                   = (1 << 21),
+       MVS_IRQ_UART                   = (1 << 22),
+       MVS_IRQ_SPI                    = (1 << 23),
+       MVS_IRQ_I2C                    = (1 << 24),
+       MVS_IRQ_SGPIO                  = (1 << 25),
+       MVS_IRQ_COM_ERR                = (1 << 29),
+       MVS_IRQ_I2O_ERR                = (1 << 30),
+       MVS_IRQ_PCIE_ERR               = (1 << 31),
 };
 
 union reg_phy_cfg {
index c4f31b2..e90c89f 100644 (file)
@@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
  * pm8001_get_phy_settings_info : Read phy setting values.
  * @pm8001_ha : our hba.
  */
-void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
+static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
 {
 
 #ifdef PM8001_READ_VPD
@@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
        payload.offset = 0;
        payload.length = 4096;
        payload.func_specific = kzalloc(4096, GFP_KERNEL);
+       if (!payload.func_specific)
+               return -ENOMEM;
        /* Read phy setting values from flash */
        PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
        wait_for_completion(&completion);
        pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
+       kfree(payload.func_specific);
 #endif
+       return 0;
 }
 
 #ifdef PM8001_USE_MSIX
@@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
        pm8001_init_sas_add(pm8001_ha);
        /* phy setting support for motherboard controller */
        if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
-               pdev->subsystem_vendor != 0)
-               pm8001_get_phy_settings_info(pm8001_ha);
+               pdev->subsystem_vendor != 0) {
+               rc = pm8001_get_phy_settings_info(pm8001_ha);
+               if (rc)
+                       goto err_out_shost;
+       }
        pm8001_post_sas_ha_init(shost, chip);
        rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
        if (rc)
index 4b188b0..e632e14 100644 (file)
@@ -1128,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
        ctio->u.status1.flags =
            __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
                CTIO7_FLAGS_TERMINATE);
-       ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
+       ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
 
        qla2x00_start_iocbs(vha, vha->req);
 
@@ -1262,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
 {
        struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
        struct ctio7_to_24xx *ctio;
+       uint16_t temp;
 
        ql_dbg(ql_dbg_tgt, ha, 0xe008,
            "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
@@ -1292,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
        ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
            __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
                CTIO7_FLAGS_SEND_STATUS);
-       ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       ctio->u.status1.ox_id = cpu_to_le16(temp);
        ctio->u.status1.scsi_status =
            __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
        ctio->u.status1.response_len = __constant_cpu_to_le16(8);
@@ -1513,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
        struct ctio7_to_24xx *pkt;
        struct qla_hw_data *ha = vha->hw;
        struct atio_from_isp *atio = &prm->cmd->atio;
+       uint16_t temp;
 
        pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
        prm->pkt = pkt;
@@ -1541,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
        pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
        pkt->exchange_addr = atio->u.isp24.exchange_addr;
        pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
-       pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       pkt->u.status0.ox_id = cpu_to_le16(temp);
        pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
 
        ql_dbg(ql_dbg_tgt, vha, 0xe00c,
            "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
-           vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
-           le16_to_cpu(pkt->u.status0.ox_id));
+           vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
        return 0;
 }
 
@@ -2619,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
        struct qla_hw_data *ha = vha->hw;
        request_t *pkt;
        int ret = 0;
+       uint16_t temp;
 
        ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
 
@@ -2655,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
        ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
            __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
                CTIO7_FLAGS_TERMINATE);
-       ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
+       temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
+       ctio24->u.status1.ox_id = cpu_to_le16(temp);
 
        /* Most likely, it isn't needed */
        ctio24->u.status1.residual = get_unaligned((uint32_t *)
index e0a58fd..d1d24fb 100644 (file)
@@ -443,7 +443,7 @@ struct ctio7_to_24xx {
                        uint16_t reserved1;
                        __le16 flags;
                        uint32_t residual;
-                       uint16_t ox_id;
+                       __le16 ox_id;
                        uint16_t scsi_status;
                        uint32_t relative_offset;
                        uint32_t reserved2;
@@ -458,7 +458,7 @@ struct ctio7_to_24xx {
                        uint16_t sense_length;
                        uint16_t flags;
                        uint32_t residual;
-                       uint16_t ox_id;
+                       __le16 ox_id;
                        uint16_t scsi_status;
                        uint16_t response_len;
                        uint16_t reserved;
index cbe38e5..7e95791 100644 (file)
@@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work)
                                    "aborting command %p\n", scmd));
                rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
                if (rtn == SUCCESS) {
-                       scmd->result |= DID_TIME_OUT << 16;
+                       set_host_byte(scmd, DID_TIME_OUT);
                        if (scsi_host_eh_past_deadline(sdev->host)) {
                                SCSI_LOG_ERROR_RECOVERY(3,
                                        scmd_printk(KERN_INFO, scmd,
@@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work)
                        scmd_printk(KERN_WARNING, scmd,
                                    "scmd %p terminate "
                                    "aborted command\n", scmd));
-               scmd->result |= DID_TIME_OUT << 16;
+               set_host_byte(scmd, DID_TIME_OUT);
                scsi_finish_command(scmd);
        }
 }
@@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
        else if (host->hostt->eh_timed_out)
                rtn = host->hostt->eh_timed_out(scmd);
 
-       if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort)
-               if (scsi_abort_command(scmd) == SUCCESS)
+       if (rtn == BLK_EH_NOT_HANDLED) {
+               if (!host->hostt->no_async_abort &&
+                   scsi_abort_command(scmd) == SUCCESS)
                        return BLK_EH_NOT_HANDLED;
 
-       scmd->result |= DID_TIME_OUT << 16;
-
-       if (unlikely(rtn == BLK_EH_NOT_HANDLED &&
-                    !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD)))
-               rtn = BLK_EH_HANDLED;
+               set_host_byte(scmd, DID_TIME_OUT);
+               if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
+                       rtn = BLK_EH_HANDLED;
+       }
 
        return rtn;
 }
@@ -1777,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
                break;
        case DID_ABORT:
                if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
-                       scmd->result |= DID_TIME_OUT << 16;
+                       set_host_byte(scmd, DID_TIME_OUT);
                        return SUCCESS;
                }
        case DID_NO_CONNECT:
index f80908f..521f583 100644 (file)
@@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work)
                        fc_flush_devloss(shost);
                if (!cancel_delayed_work(&rport->dev_loss_work))
                        fc_flush_devloss(shost);
+               cancel_work_sync(&rport->scan_work);
                spin_lock_irqsave(shost->host_lock, flags);
                rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
        }
index e9689d5..6825eda 100644 (file)
@@ -2441,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
                }
 
                sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
-               if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
+               if (sdp->broken_fua) {
+                       sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
+                       sdkp->DPOFUA = 0;
+               } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
                        sd_first_printk(KERN_NOTICE, sdkp,
                                  "Uses READ/WRITE(6), disabling FUA\n");
                        sdkp->DPOFUA = 0;
index 89ee592..308256b 100644 (file)
@@ -237,6 +237,16 @@ static void virtscsi_req_done(struct virtqueue *vq)
        virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
 };
 
+static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
+{
+       int i, num_vqs;
+
+       num_vqs = vscsi->num_queues;
+       for (i = 0; i < num_vqs; i++)
+               virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
+                                virtscsi_complete_cmd);
+}
+
 static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
 {
        struct virtio_scsi_cmd *cmd = buf;
@@ -253,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
        virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
 };
 
+static void virtscsi_handle_event(struct work_struct *work);
+
 static int virtscsi_kick_event(struct virtio_scsi *vscsi,
                               struct virtio_scsi_event_node *event_node)
 {
@@ -260,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
        struct scatterlist sg;
        unsigned long flags;
 
+       INIT_WORK(&event_node->work, virtscsi_handle_event);
        sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
 
        spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
@@ -377,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
 {
        struct virtio_scsi_event_node *event_node = buf;
 
-       INIT_WORK(&event_node->work, virtscsi_handle_event);
        schedule_work(&event_node->work);
 }
 
@@ -589,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
            cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
                ret = SUCCESS;
 
+       /*
+        * The spec guarantees that all requests related to the TMF have
+        * been completed, but the callback might not have run yet if
+        * we're using independent interrupts (e.g. MSI).  Poll the
+        * virtqueues once.
+        *
+        * In the abort case, sc->scsi_done will do nothing, because
+        * the block layer must have detected a timeout and as a result
+        * REQ_ATOM_COMPLETE has been set.
+        */
+       virtscsi_poll_requests(vscsi);
+
 out:
        mempool_free(cmd, virtscsi_cmd_pool);
        return ret;
index f6759dc..c41ff14 100644 (file)
@@ -368,7 +368,7 @@ int pxa2xx_spi_set_dma_burst_and_threshold(struct chip_data *chip,
         * otherwise we use the default. Also we use the default FIFO
         * thresholds for now.
         */
-       *burst_code = chip_info ? chip_info->dma_burst_size : 16;
+       *burst_code = chip_info ? chip_info->dma_burst_size : 1;
        *threshold = SSCR1_RxTresh(RX_THRESH_DFLT)
                   | SSCR1_TxTresh(TX_THRESH_DFLT);
 
index a98df7e..fe79210 100644 (file)
@@ -118,6 +118,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
         */
        orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
 
+       /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
        value = orig | SPI_CS_CONTROL_SW_MODE;
        writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
        value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
@@ -126,10 +127,13 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
                goto detection_done;
        }
 
-       value &= ~SPI_CS_CONTROL_SW_MODE;
+       orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
+
+       /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
+       value = orig & ~SPI_CS_CONTROL_SW_MODE;
        writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
        value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
-       if (value != orig) {
+       if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
                offset = 0x800;
                goto detection_done;
        }
index fc1de86..c08da38 100644 (file)
@@ -424,31 +424,6 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
        return 0;
 }
 
-static void spi_qup_set_cs(struct spi_device *spi, bool enable)
-{
-       struct spi_qup *controller = spi_master_get_devdata(spi->master);
-
-       u32 iocontol, mask;
-
-       iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
-
-       /* Disable auto CS toggle and use manual */
-       iocontol &= ~SPI_IO_C_MX_CS_MODE;
-       iocontol |= SPI_IO_C_FORCE_CS;
-
-       iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
-       iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
-
-       mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
-
-       if (enable)
-               iocontol |= mask;
-       else
-               iocontol &= ~mask;
-
-       writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
-}
-
 static int spi_qup_transfer_one(struct spi_master *master,
                              struct spi_device *spi,
                              struct spi_transfer *xfer)
@@ -571,12 +546,16 @@ static int spi_qup_probe(struct platform_device *pdev)
                return -ENOMEM;
        }
 
+       /* use num-cs unless not present or out of range */
+       if (of_property_read_u16(dev->of_node, "num-cs",
+                       &master->num_chipselect) ||
+                       (master->num_chipselect > SPI_NUM_CHIPSELECTS))
+               master->num_chipselect = SPI_NUM_CHIPSELECTS;
+
        master->bus_num = pdev->id;
        master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
-       master->num_chipselect = SPI_NUM_CHIPSELECTS;
        master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
        master->max_speed_hz = max_freq;
-       master->set_cs = spi_qup_set_cs;
        master->transfer_one = spi_qup_transfer_one;
        master->dev.of_node = pdev->dev.of_node;
        master->auto_runtime_pm = true;
@@ -640,16 +619,19 @@ static int spi_qup_probe(struct platform_device *pdev)
        if (ret)
                goto error;
 
-       ret = devm_spi_register_master(dev, master);
-       if (ret)
-               goto error;
-
        pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
        pm_runtime_use_autosuspend(dev);
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
+
+       ret = devm_spi_register_master(dev, master);
+       if (ret)
+               goto disable_pm;
+
        return 0;
 
+disable_pm:
+       pm_runtime_disable(&pdev->dev);
 error:
        clk_disable_unprepare(cclk);
        clk_disable_unprepare(iclk);
index 1f56ef6..b83dd73 100644 (file)
@@ -175,9 +175,9 @@ static int sh_sci_spi_remove(struct platform_device *dev)
 {
        struct sh_sci_spi *sp = platform_get_drvdata(dev);
 
-       iounmap(sp->membase);
-       setbits(sp, PIN_INIT, 0);
        spi_bitbang_stop(&sp->bitbang);
+       setbits(sp, PIN_INIT, 0);
+       iounmap(sp->membase);
        spi_master_put(sp->bitbang.master);
        return 0;
 }
index 2c61783..c341ac1 100644 (file)
@@ -97,7 +97,6 @@ void timed_output_dev_unregister(struct timed_output_dev *tdev)
 {
        tdev->enable(tdev, 0);
        device_destroy(timed_output_class, MKDEV(0, tdev->index));
-       dev_set_drvdata(tdev->dev, NULL);
 }
 EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
 
index 5d56428..a2f6957 100644 (file)
@@ -651,6 +651,7 @@ config COMEDI_ADDI_APCI_1516
 
 config COMEDI_ADDI_APCI_1564
        tristate "ADDI-DATA APCI_1564 support"
+       select COMEDI_ADDI_WATCHDOG
        ---help---
          Enable support for ADDI-DATA APCI_1564 cards
 
index 09f3d5c..85d776b 100644 (file)
@@ -917,7 +917,8 @@ c4_add_dev(hdw_info_t *hi, int brdno, unsigned long f0, unsigned long f1,
        struct net_device *ndev;
        ci_t       *ci;
 
-       ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, c4_setup);
+       ndev = alloc_netdev(sizeof(ci_t), SBE_IFACETMPL, NET_NAME_UNKNOWN,
+                           c4_setup);
        if (!ndev) {
                pr_warning("%s: no memory for struct net_device !\n",
                           hi->devname);
index 64c55b9..c226852 100644 (file)
@@ -885,7 +885,7 @@ int register_lte_device(struct phy_dev *phy_dev,
 
                /* Allocate netdev */
                net = alloc_netdev(sizeof(struct nic), pdn_dev_name,
-                               ether_setup);
+                                  NET_NAME_UNKNOWN, ether_setup);
                if (net == NULL) {
                        pr_err("alloc_netdev failed\n");
                        ret = -ENOMEM;
index e5e5115..a9a6fc5 100644 (file)
@@ -886,7 +886,8 @@ int register_wimax_device(struct phy_dev *phy_dev, struct device *pdev)
        struct net_device *dev;
        int ret;
 
-       dev = alloc_netdev(sizeof(*nic), "wm%d", ether_setup);
+       dev = alloc_netdev(sizeof(*nic), "wm%d", NET_NAME_UNKNOWN,
+                          ether_setup);
 
        if (dev == NULL) {
                pr_err("alloc_etherdev failed\n");
index b36feb0..fa38be0 100644 (file)
@@ -36,10 +36,11 @@ config IIO_SIMPLE_DUMMY_EVENTS
          Add some dummy events to the simple dummy driver.
 
 config IIO_SIMPLE_DUMMY_BUFFER
-       boolean "Buffered capture support"
-       select IIO_KFIFO_BUF
-       help
-         Add buffered data capture to the simple dummy driver.
+       boolean "Buffered capture support"
+       select IIO_BUFFER
+       select IIO_KFIFO_BUF
+       help
+         Add buffered data capture to the simple dummy driver.
 
 endif # IIO_SIMPLE_DUMMY
 
index 357cef2..7194bd1 100644 (file)
@@ -465,7 +465,7 @@ static int ad7291_probe(struct i2c_client *client,
        struct ad7291_platform_data *pdata = client->dev.platform_data;
        struct ad7291_chip_info *chip;
        struct iio_dev *indio_dev;
-       int ret = 0;
+       int ret;
 
        indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
        if (!indio_dev)
@@ -475,7 +475,7 @@ static int ad7291_probe(struct i2c_client *client,
        if (pdata && pdata->use_external_ref) {
                chip->reg = devm_regulator_get(&client->dev, "vref");
                if (IS_ERR(chip->reg))
-                       return ret;
+                       return PTR_ERR(chip->reg);
 
                ret = regulator_enable(chip->reg);
                if (ret)
index dae8d1a..52d7517 100644 (file)
@@ -846,6 +846,14 @@ static int mxs_lradc_read_single(struct iio_dev *iio_dev, int chan, int *val)
                        LRADC_CTRL1);
        mxs_lradc_reg_clear(lradc, 0xff, LRADC_CTRL0);
 
+       /* Enable / disable the divider per requirement */
+       if (test_bit(chan, &lradc->is_divided))
+               mxs_lradc_reg_set(lradc, 1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
+                       LRADC_CTRL2);
+       else
+               mxs_lradc_reg_clear(lradc,
+                       1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET, LRADC_CTRL2);
+
        /* Clean the slot's previous content, then set new one. */
        mxs_lradc_reg_clear(lradc, LRADC_CTRL4_LRADCSELECT_MASK(0),
                        LRADC_CTRL4);
@@ -961,15 +969,11 @@ static int mxs_lradc_write_raw(struct iio_dev *iio_dev,
                if (val == scale_avail[MXS_LRADC_DIV_DISABLED].integer &&
                    val2 == scale_avail[MXS_LRADC_DIV_DISABLED].nano) {
                        /* divider by two disabled */
-                       writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
-                              lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_CLR);
                        clear_bit(chan->channel, &lradc->is_divided);
                        ret = 0;
                } else if (val == scale_avail[MXS_LRADC_DIV_ENABLED].integer &&
                           val2 == scale_avail[MXS_LRADC_DIV_ENABLED].nano) {
                        /* divider by two enabled */
-                       writel(1 << LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET,
-                              lradc->base + LRADC_CTRL2 + STMP_OFFSET_REG_SET);
                        set_bit(chan->channel, &lradc->is_divided);
                        ret = 0;
                }
index 9e0f2a9..ab338e3 100644 (file)
@@ -667,9 +667,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
        chip->tsl2x7x_config[TSL2X7X_PRX_COUNT] =
                        chip->tsl2x7x_settings.prox_pulse_count;
        chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHLO] =
-       chip->tsl2x7x_settings.prox_thres_low;
+                       (chip->tsl2x7x_settings.prox_thres_low) & 0xFF;
+       chip->tsl2x7x_config[TSL2X7X_PRX_MINTHRESHHI] =
+                       (chip->tsl2x7x_settings.prox_thres_low >> 8) & 0xFF;
        chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHLO] =
-                       chip->tsl2x7x_settings.prox_thres_high;
+                       (chip->tsl2x7x_settings.prox_thres_high) & 0xFF;
+       chip->tsl2x7x_config[TSL2X7X_PRX_MAXTHRESHHI] =
+                       (chip->tsl2x7x_settings.prox_thres_high >> 8) & 0xFF;
 
        /* and make sure we're not already on */
        if (chip->tsl2x7x_chip_status == TSL2X7X_CHIP_WORKING) {
index b567832..4ca61af 100644 (file)
@@ -173,6 +173,13 @@ static int imx_pd_register(struct drm_device *drm,
        if (ret)
                return ret;
 
+       /* set the connector's dpms to OFF so that
+        * drm_helper_connector_dpms() won't return
+        * immediately since the current state is ON
+        * at this point.
+        */
+       imxpd->connector.dpms = DRM_MODE_DPMS_OFF;
+
        drm_encoder_helper_add(&imxpd->encoder, &imx_pd_encoder_helper_funcs);
        drm_encoder_init(drm, &imxpd->encoder, &imx_pd_encoder_funcs,
                         DRM_MODE_ENCODER_NONE);
index 78b0fba..8afc6fe 100644 (file)
@@ -1,6 +1,6 @@
 config VIDEO_OMAP4
        bool "OMAP 4 Camera support"
-       depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API && I2C && ARCH_OMAP4
+       depends on VIDEO_V4L2=y && VIDEO_V4L2_SUBDEV_API && I2C=y && ARCH_OMAP4
        select VIDEOBUF2_DMA_CONTIG
        ---help---
          Driver for an OMAP 4 ISS controller.
index 0acacab..46f5abc 100644 (file)
@@ -298,7 +298,7 @@ int rtl8723a_FirmwareDownload(struct rtw_adapter *padapter)
        RT_TRACE(_module_hal_init_c_, _drv_info_, ("+%s\n", __func__));
 
        if (IS_8723A_A_CUT(pHalData->VersionID)) {
-               fw_name = "rtlwifi/rtl8723aufw.bin";
+               fw_name = "rtlwifi/rtl8723aufw_A.bin";
                RT_TRACE(_module_hal_init_c_, _drv_info_,
                         ("rtl8723a_FirmwareDownload: R8723FwImageArray_UMC "
                          "for RTL8723A A CUT\n"));
index 4e32003..1fb3438 100644 (file)
@@ -29,7 +29,9 @@ MODULE_AUTHOR("Realtek Semiconductor Corp.");
 MODULE_AUTHOR("Larry Finger <Larry.Finger@lwfinger.net>");
 MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@redhat.com>");
 MODULE_VERSION(DRIVERVERSION);
-MODULE_FIRMWARE("rtlwifi/rtl8821aefw.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_A.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B.bin");
+MODULE_FIRMWARE("rtlwifi/rtl8723aufw_B_NoBT.bin");
 
 /* module param defaults */
 static int rtw_chip_version = 0x00;
index 8945b4e..cb50120 100644 (file)
@@ -280,8 +280,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
 
                /* Wait until the state has moved to ON */
-               while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)&
-                                       OMAP_INTRANSITION_MASK);
+               while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
+                                             OMAP2_PM_PWSTST) &
+                                               OMAP_INTRANSITION_MASK)
+                       ;
                /* Disable Automatic transition */
                (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
                                        OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
index 8392d4d..0814bfd 100644 (file)
@@ -89,7 +89,8 @@ static int wpa_init_wpadev(PSDevice pDevice)
        struct net_device *dev = pDevice->dev;
        int ret = 0;
 
-       pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa", wpadev_setup);
+       pDevice->wpadev = alloc_netdev(sizeof(PSDevice), "vntwpa",
+                                      NET_NAME_UNKNOWN, wpadev_setup);
        if (pDevice->wpadev == NULL)
                return -ENOMEM;
 
index 00b186c..6c78f91 100644 (file)
@@ -769,7 +769,7 @@ int wlan_setup(wlandevice_t *wlandev, struct device *physdev)
 
        /* Allocate and initialize the struct device */
        netdev = alloc_netdev(sizeof(struct wireless_dev), "wlan%d",
-                               ether_setup);
+                             NET_NAME_UNKNOWN, ether_setup);
        if (netdev == NULL) {
                dev_err(physdev, "Failed to alloc netdev.\n");
                wlan_free_wiphy(wiphy);
index 5663f4d..1f4c794 100644 (file)
@@ -1309,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
        if (cmd->data_direction != DMA_TO_DEVICE) {
                pr_err("Command ITT: 0x%08x received DataOUT for a"
                        " NON-WRITE command.\n", cmd->init_task_tag);
-               return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf);
+               return iscsit_dump_data_payload(conn, payload_length, 1);
        }
        se_cmd = &cmd->se_cmd;
        iscsit_mod_dataout_timer(cmd);
index 19b842c..ab4915c 100644 (file)
@@ -174,7 +174,6 @@ static int chap_server_compute_md5(
        char *nr_out_ptr,
        unsigned int *nr_out_len)
 {
-       char *endptr;
        unsigned long id;
        unsigned char id_as_uchar;
        unsigned char digest[MD5_SIGNATURE_SIZE];
@@ -320,9 +319,14 @@ static int chap_server_compute_md5(
        }
 
        if (type == HEX)
-               id = simple_strtoul(&identifier[2], &endptr, 0);
+               ret = kstrtoul(&identifier[2], 0, &id);
        else
-               id = simple_strtoul(identifier, &endptr, 0);
+               ret = kstrtoul(identifier, 0, &id);
+
+       if (ret < 0) {
+               pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
+               goto out;
+       }
        if (id > 255) {
                pr_err("chap identifier: %lu greater than 255\n", id);
                goto out;
@@ -351,6 +355,10 @@ static int chap_server_compute_md5(
                pr_err("Unable to convert incoming challenge\n");
                goto out;
        }
+       if (challenge_len > 1024) {
+               pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
+               goto out;
+       }
        /*
         * During mutual authentication, the CHAP_C generated by the
         * initiator must not match the original CHAP_C generated by
index fecb695..5e71ac6 100644 (file)
@@ -1216,7 +1216,7 @@ old_sess_out:
 static int __iscsi_target_login_thread(struct iscsi_np *np)
 {
        u8 *buffer, zero_tsih = 0;
-       int ret = 0, rc, stop;
+       int ret = 0, rc;
        struct iscsi_conn *conn = NULL;
        struct iscsi_login *login;
        struct iscsi_portal_group *tpg = NULL;
@@ -1230,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
        if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
                np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
                complete(&np->np_restart_comp);
+       } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
+               spin_unlock_bh(&np->np_thread_lock);
+               goto exit;
        } else {
                np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
        }
@@ -1422,10 +1425,8 @@ old_sess_out:
        }
 
 out:
-       stop = kthread_should_stop();
-       /* Wait for another socket.. */
-       if (!stop)
-               return 1;
+       return 1;
+
 exit:
        iscsi_stop_login_thread_timer(np);
        spin_lock_bh(&np->np_thread_lock);
@@ -1442,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
 
        allow_signal(SIGINT);
 
-       while (!kthread_should_stop()) {
+       while (1) {
                ret = __iscsi_target_login_thread(np);
                /*
                 * We break and exit here unless another sock_accept() call
index 53e157c..fd90b28 100644 (file)
@@ -1295,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
        login->login_failed = 1;
        iscsit_collect_login_stats(conn, status_class, status_detail);
 
+       memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
+
        hdr     = (struct iscsi_login_rsp *)&login->rsp[0];
        hdr->opcode             = ISCSI_OP_LOGIN_RSP;
        hdr->status_class       = status_class;
index 6d2f375..8c64b87 100644 (file)
@@ -239,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
        return;
 
 out_done:
+       kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
        sc->scsi_done(sc);
        return;
 }
index 11d26fe..98da901 100644 (file)
@@ -616,6 +616,7 @@ void core_dev_unexport(
        dev->export_count--;
        spin_unlock(&hba->device_lock);
 
+       lun->lun_sep = NULL;
        lun->lun_se_dev = NULL;
 }
 
index a8aaf6a..9465623 100644 (file)
@@ -129,7 +129,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
 
                tc_device_get_irq(tdev);
 
-               device_register(&tdev->dev);
+               if (device_register(&tdev->dev)) {
+                       put_device(&tdev->dev);
+                       goto out_err;
+               }
                list_add_tail(&tdev->node, &tbus->devices);
 
 out_err:
@@ -148,7 +151,10 @@ static int __init tc_init(void)
 
        INIT_LIST_HEAD(&tc_bus.devices);
        dev_set_name(&tc_bus.dev, "tc");
-       device_register(&tc_bus.dev);
+       if (device_register(&tc_bus.dev)) {
+               put_device(&tc_bus.dev);
+               return 0;
+       }
 
        if (tc_bus.info.slot_size) {
                unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
index a99c631..2c516f2 100644 (file)
@@ -306,7 +306,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
 {
        struct imx_thermal_data *data = platform_get_drvdata(pdev);
        struct regmap *map;
-       int t1, t2, n1, n2;
+       int t1, n1;
        int ret;
        u32 val;
        u64 temp64;
@@ -333,14 +333,10 @@ static int imx_get_sensor_data(struct platform_device *pdev)
        /*
         * Sensor data layout:
         *   [31:20] - sensor value @ 25C
-        *    [19:8] - sensor value of hot
-        *     [7:0] - hot temperature value
         * Use universal formula now and only need sensor value @ 25C
         * slope = 0.4297157 - (0.0015976 * 25C fuse)
         */
        n1 = val >> 20;
-       n2 = (val & 0xfff00) >> 8;
-       t2 = val & 0xff;
        t1 = 25; /* t1 always 25C */
 
        /*
@@ -366,16 +362,16 @@ static int imx_get_sensor_data(struct platform_device *pdev)
        data->c2 = n1 * data->c1 + 1000 * t1;
 
        /*
-        * Set the default passive cooling trip point to 20 °C below the
-        * maximum die temperature. Can be changed from userspace.
+        * Set the default passive cooling trip point,
+        * can be changed from userspace.
         */
-       data->temp_passive = 1000 * (t2 - 20);
+       data->temp_passive = IMX_TEMP_PASSIVE;
 
        /*
-        * The maximum die temperature is t2, let's give 5 °C cushion
-        * for noise and possible temperature rise between measurements.
+        * The maximum die temperature set to 20 C higher than
+        * IMX_TEMP_PASSIVE.
         */
-       data->temp_critical = 1000 * (t2 - 5);
+       data->temp_critical = 1000 * 20 + data->temp_passive;
 
        return 0;
 }
index 04b1be7..4b2b999 100644 (file)
@@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
 
                        ret = thermal_zone_bind_cooling_device(thermal,
                                                tbp->trip_id, cdev,
-                                               tbp->min,
-                                               tbp->max);
+                                               tbp->max,
+                                               tbp->min);
                        if (ret)
                                return ret;
                }
@@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np)
        }
 
        i = 0;
-       for_each_child_of_node(child, gchild)
+       for_each_child_of_node(child, gchild) {
                ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
                                                      tz->trips, tz->ntrips);
                if (ret)
                        goto free_tbps;
+       }
 
 finish:
        of_node_put(child);
index fdb0719..1967bee 100644 (file)
@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
        return NULL;
 }
 
+static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
+{
+       unsigned long temp;
+       return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
+}
+
 int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
 {
        struct thermal_hwmon_device *hwmon;
@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
        if (result)
                goto free_temp_mem;
 
-       if (tz->ops->get_crit_temp) {
-               unsigned long temperature;
-               if (!tz->ops->get_crit_temp(tz, &temperature)) {
-                       snprintf(temp->temp_crit.name,
-                                sizeof(temp->temp_crit.name),
+       if (thermal_zone_crit_temp_valid(tz)) {
+               snprintf(temp->temp_crit.name,
+                               sizeof(temp->temp_crit.name),
                                "temp%d_crit", hwmon->count);
-                       temp->temp_crit.attr.attr.name = temp->temp_crit.name;
-                       temp->temp_crit.attr.attr.mode = 0444;
-                       temp->temp_crit.attr.show = temp_crit_show;
-                       sysfs_attr_init(&temp->temp_crit.attr.attr);
-                       result = device_create_file(hwmon->device,
-                                                   &temp->temp_crit.attr);
-                       if (result)
-                               goto unregister_input;
-               }
+               temp->temp_crit.attr.attr.name = temp->temp_crit.name;
+               temp->temp_crit.attr.attr.mode = 0444;
+               temp->temp_crit.attr.show = temp_crit_show;
+               sysfs_attr_init(&temp->temp_crit.attr.attr);
+               result = device_create_file(hwmon->device,
+                                           &temp->temp_crit.attr);
+               if (result)
+                       goto unregister_input;
        }
 
        mutex_lock(&thermal_hwmon_list_lock);
@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
        }
 
        device_remove_file(hwmon->device, &temp->temp_input.attr);
-       if (tz->ops->get_crit_temp)
+       if (thermal_zone_crit_temp_valid(tz))
                device_remove_file(hwmon->device, &temp->temp_crit.attr);
 
        mutex_lock(&thermal_hwmon_list_lock);
index a1271b5..634b6ce 100644 (file)
@@ -1155,7 +1155,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
        /* register shadow for context save and restore */
        bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
                                   bgp->conf->sensor_count, GFP_KERNEL);
-       if (!bgp) {
+       if (!bgp->regval) {
                dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
                return ERR_PTR(-ENOMEM);
        }
index 2ebe47b..cde3ab9 100644 (file)
@@ -2789,9 +2789,8 @@ static int gsm_create_network(struct gsm_dlci *dlci, struct gsm_netconfig *nc)
        netname = "gsm%d";
        if (nc->if_name[0] != '\0')
                netname = nc->if_name;
-       net = alloc_netdev(sizeof(struct gsm_mux_net),
-                       netname,
-                       gsm_mux_net_init);
+       net = alloc_netdev(sizeof(struct gsm_mux_net), netname,
+                          NET_NAME_UNKNOWN, gsm_mux_net_init);
        if (!net) {
                pr_err("alloc_netdev failed");
                return -ENOMEM;
index f95569d..f44f1ba 100644 (file)
@@ -1214,15 +1214,16 @@ static void n_tty_receive_parity_error(struct tty_struct *tty, unsigned char c)
 {
        struct n_tty_data *ldata = tty->disc_data;
 
-       if (I_IGNPAR(tty))
-               return;
-       if (I_PARMRK(tty)) {
-               put_tty_queue('\377', ldata);
-               put_tty_queue('\0', ldata);
-               put_tty_queue(c, ldata);
-       } else  if (I_INPCK(tty))
-               put_tty_queue('\0', ldata);
-       else
+       if (I_INPCK(tty)) {
+               if (I_IGNPAR(tty))
+                       return;
+               if (I_PARMRK(tty)) {
+                       put_tty_queue('\377', ldata);
+                       put_tty_queue('\0', ldata);
+                       put_tty_queue(c, ldata);
+               } else
+                       put_tty_queue('\0', ldata);
+       } else
                put_tty_queue(c, ldata);
        if (waitqueue_active(&tty->read_wait))
                wake_up_interruptible(&tty->read_wait);
index 27f7ad6..7a91c6d 100644 (file)
@@ -2357,7 +2357,7 @@ serial8250_do_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART_LSR_BI;
 
        /*
index cfef801..4858b8a 100644 (file)
@@ -144,8 +144,11 @@ static int __init early_serial8250_setup(struct earlycon_device *device,
        if (!(device->port.membase || device->port.iobase))
                return 0;
 
-       if (!device->baud)
+       if (!device->baud) {
                device->baud = probe_baud(&device->port);
+               snprintf(device->options, sizeof(device->options), "%u",
+                        device->baud);
+       }
 
        init_port(device);
 
index 501667e..3233766 100644 (file)
@@ -185,6 +185,12 @@ static void altera_uart_set_termios(struct uart_port *port,
        uart_update_timeout(port, termios->c_cflag, baud);
        altera_uart_writel(port, baudclk, ALTERA_UART_DIVISOR_REG);
        spin_unlock_irqrestore(&port->lock, flags);
+
+       /*
+        * FIXME: port->read_status_mask and port->ignore_status_mask
+        * need to be initialized based on termios settings for
+        * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
+        */
 }
 
 static void altera_uart_rx_chars(struct altera_uart *pp)
index 01c9e72..971af1e 100644 (file)
@@ -420,7 +420,7 @@ pl010_set_termios(struct uart_port *port, struct ktermios *termios,
        uap->port.read_status_mask = UART01x_RSR_OE;
        if (termios->c_iflag & INPCK)
                uap->port.read_status_mask |= UART01x_RSR_FE | UART01x_RSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                uap->port.read_status_mask |= UART01x_RSR_BE;
 
        /*
index 908a6e3..0e26dcb 100644 (file)
@@ -1744,7 +1744,7 @@ pl011_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = UART011_DR_OE | 255;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= UART011_DR_FE | UART011_DR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART011_DR_BE;
 
        /*
index c9f5c9d..008c223 100644 (file)
@@ -177,7 +177,7 @@ static void arc_serial_tx_chars(struct arc_uart_port *uart)
                uart->port.icount.tx++;
                uart->port.x_char = 0;
                sent = 1;
-       } else if (xmit->tail != xmit->head) {  /* TODO: uart_circ_empty */
+       } else if (!uart_circ_empty(xmit)) {
                ch = xmit->buf[xmit->tail];
                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
                uart->port.icount.tx++;
index 3fceae0..c4f7503 100644 (file)
@@ -1932,7 +1932,7 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = ATMEL_US_OVRE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= (ATMEL_US_FRAME | ATMEL_US_PARE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= ATMEL_US_RXBRK;
 
        if (atmel_use_pdc_rx(port))
index a47421e..2315190 100644 (file)
@@ -567,7 +567,7 @@ static void bcm_uart_set_termios(struct uart_port *port,
                port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
                port->read_status_mask |= UART_FIFO_PARERR_MASK;
        }
-       if (new->c_iflag & (BRKINT))
+       if (new->c_iflag & (IGNBRK | BRKINT))
                port->read_status_mask |= UART_FIFO_BRKDET_MASK;
 
        port->ignore_status_mask = 0;
index 869ceba..ac86a20 100644 (file)
@@ -833,7 +833,7 @@ bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = OE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= (FE | PE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= BI;
 
        /*
index 2f2b2e5..cdbbc78 100644 (file)
@@ -625,7 +625,7 @@ static void dz_set_termios(struct uart_port *uport, struct ktermios *termios,
        dport->port.read_status_mask = DZ_OERR;
        if (termios->c_iflag & INPCK)
                dport->port.read_status_mask |= DZ_FERR | DZ_PERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                dport->port.read_status_mask |= DZ_BREAK;
 
        /* characters to ignore */
index 5131b5e..a514ee6 100644 (file)
@@ -25,7 +25,7 @@
 #include <asm/serial.h>
 
 static struct console early_con = {
-       .name =         "earlycon",
+       .name =         "uart", /* 8250 console switch requires this name */
        .flags =        CON_PRINTBUFFER | CON_BOOT,
        .index =        -1,
 };
index b373f64..3b0ee9a 100644 (file)
@@ -407,7 +407,7 @@ static void efm32_uart_set_termios(struct uart_port *port,
        if (new->c_iflag & INPCK)
                port->read_status_mask |=
                        UARTn_RXDATAX_FERR | UARTn_RXDATAX_PERR;
-       if (new->c_iflag & (BRKINT | PARMRK))
+       if (new->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= SW_UARTn_RXDATAX_BERR;
 
        port->ignore_status_mask = 0;
index c5eb897..49385c8 100644 (file)
@@ -902,7 +902,7 @@ lpuart_set_termios(struct uart_port *port, struct ktermios *termios,
        sport->port.read_status_mask = 0;
        if (termios->c_iflag & INPCK)
                sport->port.read_status_mask |= (UARTSR1_FE | UARTSR1_PE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                sport->port.read_status_mask |= UARTSR1_FE;
 
        /* characters to ignore */
index e2f9387..044e86d 100644 (file)
@@ -567,6 +567,9 @@ static void imx_start_tx(struct uart_port *port)
        struct imx_port *sport = (struct imx_port *)port;
        unsigned long temp;
 
+       if (uart_circ_empty(&port->state->xmit))
+               return;
+
        if (USE_IRDA(sport)) {
                /* half duplex in IrDA mode; have to disable receive mode */
                temp = readl(sport->port.membase + UCR4);
index 1d94205..99b7b86 100644 (file)
@@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port)
        } else {
                struct circ_buf *xmit = &port->state->xmit;
 
+               if (uart_circ_empty(xmit))
+                       return;
                writeb(xmit->buf[xmit->tail], &channel->data);
                ZSDELAY();
                ZS_WSYNC(channel);
@@ -850,7 +852,7 @@ ip22zilog_convert_to_zs(struct uart_ip22zilog_port *up, unsigned int cflag,
        up->port.read_status_mask = Rx_OVR;
        if (iflag & INPCK)
                up->port.read_status_mask |= CRC_ERR | PAR_ERR;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= BRK_ABRT;
 
        up->port.ignore_status_mask = 0;
index 9cd9b4e..5702828 100644 (file)
@@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port)
        if (!(up->ier & UART_IER_THRI)) {
                up->ier |= UART_IER_THRI;
                serial_out(up, UART_IER, up->ier);
-               serial_out(up, UART_TX, xmit->buf[xmit->tail]);
-               xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
-               up->port.icount.tx++;
+               if (!uart_circ_empty(xmit)) {
+                       serial_out(up, UART_TX, xmit->buf[xmit->tail]);
+                       xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
+                       up->port.icount.tx++;
+               }
        }
        while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
 #else
@@ -737,7 +739,7 @@ static void m32r_sio_set_termios(struct uart_port *port,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /*
index 2a99d0c..ba285cd 100644 (file)
@@ -835,7 +835,7 @@ static void max310x_set_termios(struct uart_port *port,
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= MAX310X_LSR_RXPAR_BIT |
                                          MAX310X_LSR_FRERR_BIT;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= MAX310X_LSR_RXBRK_BIT;
 
        /* Set status ignore mask */
index 0edfaf8..a6f0857 100644 (file)
@@ -248,6 +248,12 @@ static void mcf_set_termios(struct uart_port *port, struct ktermios *termios,
                mr1 |= MCFUART_MR1_PARITYNONE;
        }
 
+       /*
+        * FIXME: port->read_status_mask and port->ignore_status_mask
+        * need to be initialized based on termios settings for
+        * INPCK, IGNBRK, IGNPAR, PARMRK, BRKINT
+        */
+
        if (termios->c_cflag & CSTOPB)
                mr2 |= MCFUART_MR2_STOP2;
        else
index 52c930f..445799d 100644 (file)
@@ -977,7 +977,7 @@ serial_hsu_set_termios(struct uart_port *port, struct ktermios *termios,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /* Characters to ignore */
index e30a3ca..759c6a6 100644 (file)
@@ -1458,7 +1458,7 @@ static void mpsc_set_termios(struct uart_port *port, struct ktermios *termios,
                pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_PE
                        | SDMA_DESC_CMDSTAT_FR;
 
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                pi->port.read_status_mask |= SDMA_DESC_CMDSTAT_BR;
 
        /* Characters/events to ignore */
index 778e376..72000a6 100644 (file)
@@ -582,7 +582,7 @@ static void msm_set_termios(struct uart_port *port, struct ktermios *termios,
        port->read_status_mask = 0;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= UART_SR_PAR_FRAME_ERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART_SR_RX_BREAK;
 
        uart_update_timeout(port, termios->c_cflag, baud);
@@ -991,7 +991,7 @@ static const struct of_device_id msm_uartdm_table[] = {
        { }
 };
 
-static int __init msm_serial_probe(struct platform_device *pdev)
+static int msm_serial_probe(struct platform_device *pdev)
 {
        struct msm_port *msm_port;
        struct resource *resource;
index 4b5b3c2..86de447 100644 (file)
@@ -604,7 +604,7 @@ static void mxs_auart_settermios(struct uart_port *u,
 
        if (termios->c_iflag & INPCK)
                u->read_status_mask |= AUART_STAT_PERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                u->read_status_mask |= AUART_STAT_BERR;
 
        /*
index 0a4dd70..7a67456 100644 (file)
@@ -419,7 +419,7 @@ netx_set_termios(struct uart_port *port, struct ktermios *termios,
        }
 
        port->read_status_mask = 0;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= SR_BE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= SR_PE | SR_FE;
index e9d420f..f7ad5b9 100644 (file)
@@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port)
        } else {
                struct circ_buf *xmit = &port->state->xmit;
 
+               if (uart_circ_empty(xmit))
+                       goto out;
                write_zsdata(uap, xmit->buf[xmit->tail]);
                zssync(uap);
                xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port)
                if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
                        uart_write_wakeup(&uap->port);
        }
+ out:
        pmz_debug("pmz: start_tx() done.\n");
 }
 
@@ -1092,7 +1095,7 @@ static void pmz_convert_to_zs(struct uart_pmac_port *uap, unsigned int cflag,
        uap->port.read_status_mask = Rx_OVR;
        if (iflag & INPCK)
                uap->port.read_status_mask |= CRC_ERR | PAR_ERR;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                uap->port.read_status_mask |= BRK_ABRT;
 
        uap->port.ignore_status_mask = 0;
index de6c05c..2ba24a4 100644 (file)
@@ -477,7 +477,7 @@ pnx8xxx_set_termios(struct uart_port *port, struct ktermios *termios,
                sport->port.read_status_mask |=
                        FIFO_TO_SM(PNX8XXX_UART_FIFO_RXFE) |
                        FIFO_TO_SM(PNX8XXX_UART_FIFO_RXPAR);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                sport->port.read_status_mask |=
                        ISTAT_TO_SM(PNX8XXX_UART_INT_BREAK);
 
index 9e7ee39..c638c53 100644 (file)
@@ -492,7 +492,7 @@ serial_pxa_set_termios(struct uart_port *port, struct ktermios *termios,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /*
index 3293377..c1d3ebd 100644 (file)
@@ -66,7 +66,7 @@ static void dbg(const char *fmt, ...)
        char buff[256];
 
        va_start(va, fmt);
-       vscnprintf(buff, sizeof(buf), fmt, va);
+       vscnprintf(buff, sizeof(buff), fmt, va);
        va_end(va);
 
        printascii(buff);
index a7cdec2..771f361 100644 (file)
@@ -596,7 +596,7 @@ static void sbd_set_termios(struct uart_port *uport, struct ktermios *termios,
        if (termios->c_iflag & INPCK)
                uport->read_status_mask |= M_DUART_FRM_ERR |
                                           M_DUART_PARITY_ERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                uport->read_status_mask |= M_DUART_RCVD_BRK;
 
        uport->ignore_status_mask = 0;
index 5443b46..e84b6a3 100644 (file)
@@ -665,7 +665,7 @@ static void sccnxp_set_termios(struct uart_port *port,
        port->read_status_mask = SR_OVR;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= SR_PE | SR_FE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= SR_BRK;
 
        /* Set status ignore mask */
index e1caa99..5c79bda 100644 (file)
@@ -437,7 +437,7 @@ static void ks8695uart_set_termios(struct uart_port *port, struct ktermios *term
        port->read_status_mask = URLS_URROE;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= (URLS_URFE | URLS_URPE);
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= URLS_URBI;
 
        /*
index 60f49b9..ea85460 100644 (file)
@@ -697,7 +697,7 @@ serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
                TXX9_SIDISR_TDIS | TXX9_SIDISR_RDIS;
        if (termios->c_iflag & INPCK)
                up->port.read_status_mask |= TXX9_SIDISR_UFER | TXX9_SIDISR_UPER;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= TXX9_SIDISR_UBRK;
 
        /*
index 1f2be48..9b4d71c 100644 (file)
@@ -896,7 +896,7 @@ static void sirfsoc_uart_set_termios(struct uart_port *port,
                if (termios->c_iflag & INPCK)
                        port->read_status_mask |= uint_en->sirfsoc_frm_err_en;
        }
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                        port->read_status_mask |= uint_en->sirfsoc_rxd_brk_en;
        if (sirfport->uart_reg->uart_type == SIRF_REAL_UART) {
                if (termios->c_iflag & IGNPAR)
index c7f61ac..f48b1cc 100644 (file)
@@ -547,7 +547,7 @@ static void asc_set_termios(struct uart_port *port, struct ktermios *termios,
        ascport->port.read_status_mask = ASC_RXBUF_DUMMY_OE;
        if (termios->c_iflag & INPCK)
                ascport->port.read_status_mask |= ASC_RXBUF_FE | ASC_RXBUF_PE;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                ascport->port.read_status_mask |= ASC_RXBUF_DUMMY_BE;
 
        /*
index 5faa8e9..2f57df9 100644 (file)
@@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port)
        struct circ_buf *xmit = &up->port.state->xmit;
        int i;
 
+       if (uart_circ_empty(xmit))
+               return;
+
        up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
        writeb(up->interrupt_mask1, &up->regs->w.imr1);
        
@@ -719,7 +722,7 @@ static void sunsab_convert_to_sab(struct uart_sunsab_port *up, unsigned int cfla
        if (iflag & INPCK)
                up->port.read_status_mask |= (SAB82532_ISR0_PERR |
                                              SAB82532_ISR0_FERR);
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= (SAB82532_ISR1_BRK << 8);
 
        /*
index 9a0f24f..5326ae1 100644 (file)
@@ -834,7 +834,7 @@ sunsu_change_speed(struct uart_port *port, unsigned int cflag,
        up->port.read_status_mask = UART_LSR_OE | UART_LSR_THRE | UART_LSR_DR;
        if (iflag & INPCK)
                up->port.read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= UART_LSR_BI;
 
        /*
index a2c40ed..02df394 100644 (file)
@@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port)
        } else {
                struct circ_buf *xmit = &port->state->xmit;
 
+               if (uart_circ_empty(xmit))
+                       return;
                writeb(xmit->buf[xmit->tail], &channel->data);
                ZSDELAY();
                ZS_WSYNC(channel);
@@ -915,7 +917,7 @@ sunzilog_convert_to_zs(struct uart_sunzilog_port *up, unsigned int cflag,
        up->port.read_status_mask = Rx_OVR;
        if (iflag & INPCK)
                up->port.read_status_mask |= CRC_ERR | PAR_ERR;
-       if (iflag & (BRKINT | PARMRK))
+       if (iflag & (IGNBRK | BRKINT | PARMRK))
                up->port.read_status_mask |= BRK_ABRT;
 
        up->port.ignore_status_mask = 0;
index d569ca5..1c52074 100644 (file)
@@ -936,7 +936,7 @@ static void qe_uart_set_termios(struct uart_port *port,
        port->read_status_mask = BD_SC_EMPTY | BD_SC_OV;
        if (termios->c_iflag & INPCK)
                port->read_status_mask |= BD_SC_FR | BD_SC_PR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= BD_SC_BR;
 
        /*
index a63c14b..db0c8a4 100644 (file)
@@ -559,7 +559,7 @@ static void siu_set_termios(struct uart_port *port, struct ktermios *new,
        port->read_status_mask = UART_LSR_THRE | UART_LSR_OE | UART_LSR_DR;
        if (c_iflag & INPCK)
                port->read_status_mask |= UART_LSR_FE | UART_LSR_PE;
-       if (c_iflag & (BRKINT | PARMRK))
+       if (c_iflag & (IGNBRK | BRKINT | PARMRK))
                port->read_status_mask |= UART_LSR_BI;
 
        port->ignore_status_mask = 0;
index 6a16987..2b65bb7 100644 (file)
@@ -923,7 +923,7 @@ static void zs_set_termios(struct uart_port *uport, struct ktermios *termios,
        uport->read_status_mask = Rx_OVR;
        if (termios->c_iflag & INPCK)
                uport->read_status_mask |= FRM_ERR | PAR_ERR;
-       if (termios->c_iflag & (BRKINT | PARMRK))
+       if (termios->c_iflag & (IGNBRK | BRKINT | PARMRK))
                uport->read_status_mask |= Rx_BRK;
 
        uport->ignore_status_mask = 0;
index 5e0f6ff..b33b00b 100644 (file)
@@ -3226,8 +3226,7 @@ int do_unbind_con_driver(const struct consw *csw, int first, int last, int deflt
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
                con_back = &registered_con_driver[i];
 
-               if (con_back->con &&
-                   !(con_back->flag & CON_DRIVER_FLAG_MODULE)) {
+               if (con_back->con && con_back->con != csw) {
                        defcsw = con_back->con;
                        retval = 0;
                        break;
@@ -3332,6 +3331,7 @@ static int vt_unbind(struct con_driver *con)
 {
        const struct consw *csw = NULL;
        int i, more = 1, first = -1, last = -1, deflt = 0;
+       int ret;
 
        if (!con->con || !(con->flag & CON_DRIVER_FLAG_MODULE) ||
            con_is_graphics(con->con, con->first, con->last))
@@ -3357,8 +3357,10 @@ static int vt_unbind(struct con_driver *con)
 
                if (first != -1) {
                        console_lock();
-                       do_unbind_con_driver(csw, first, last, deflt);
+                       ret = do_unbind_con_driver(csw, first, last, deflt);
                        console_unlock();
+                       if (ret != 0)
+                               return ret;
                }
 
                first = -1;
@@ -3645,17 +3647,20 @@ err:
  */
 int do_unregister_con_driver(const struct consw *csw)
 {
-       int i, retval = -ENODEV;
+       int i;
 
        /* cannot unregister a bound driver */
        if (con_is_bound(csw))
-               goto err;
+               return -EBUSY;
+
+       if (csw == conswitchp)
+               return -EINVAL;
 
        for (i = 0; i < MAX_NR_CON_DRIVER; i++) {
                struct con_driver *con_driver = &registered_con_driver[i];
 
                if (con_driver->con == csw &&
-                   con_driver->flag & CON_DRIVER_FLAG_MODULE) {
+                   con_driver->flag & CON_DRIVER_FLAG_INIT) {
                        vtconsole_deinit_device(con_driver);
                        device_destroy(vtconsole_class,
                                       MKDEV(0, con_driver->node));
@@ -3666,12 +3671,11 @@ int do_unregister_con_driver(const struct consw *csw)
                        con_driver->flag = 0;
                        con_driver->first = 0;
                        con_driver->last = 0;
-                       retval = 0;
-                       break;
+                       return 0;
                }
        }
-err:
-       return retval;
+
+       return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(do_unregister_con_driver);
 
index e371f5a..a673e5b 100644 (file)
@@ -655,7 +655,7 @@ static int uio_mmap_physical(struct vm_area_struct *vma)
 
        if (mem->addr & ~PAGE_MASK)
                return -ENODEV;
-       if (vma->vm_end - vma->vm_start > PAGE_ALIGN(mem->size))
+       if (vma->vm_end - vma->vm_start > mem->size)
                return -EINVAL;
 
        vma->vm_ops = &uio_physical_vm_ops;
index 69425b3..b8125aa 100644 (file)
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
 
        if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
                cap |= QH_IOS;
-       if (hwep->num)
-               cap |= QH_ZLT;
+
+       cap |= QH_ZLT;
        cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
        /*
         * For ISO-TX, we set mult at QH as the largest value, and use
@@ -1321,6 +1321,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
        struct ci_hw_ep  *hwep  = container_of(ep,  struct ci_hw_ep, ep);
        struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
        unsigned long flags;
+       struct td_node *node, *tmpnode;
 
        if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
                hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
@@ -1331,6 +1332,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
 
        hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
 
+       list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
+               dma_pool_free(hwep->td_pool, node->ptr, node->dma);
+               list_del(&node->td);
+               kfree(node);
+       }
+
        /* pop request */
        list_del_init(&hwreq->queue);
 
index 879b66e..0e950ad 100644 (file)
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
        if (!hub_is_superspeed(hub->hdev))
                return -EINVAL;
 
+       ret = hub_port_status(hub, port1, &portstatus, &portchange);
+       if (ret < 0)
+               return ret;
+
+       /*
+        * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
+        * Controller [1022:7814] will have spurious result making the following
+        * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
+        * as high-speed device if we set the usb 3.0 port link state to
+        * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
+        * check the state here to avoid the bug.
+        */
+       if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
+                               USB_SS_PORT_LS_RX_DETECT) {
+               dev_dbg(&hub->ports[port1 - 1]->dev,
+                        "Not disabling port; link state is RxDetect\n");
+               return ret;
+       }
+
        ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
        if (ret)
                return ret;
@@ -1526,18 +1545,6 @@ static int hub_configure(struct usb_hub *hub,
                dev_dbg(hub_dev, "%umA bus power budget for each child\n",
                                hub->mA_per_port);
 
-       /* Update the HCD's internal representation of this hub before khubd
-        * starts getting port status changes for devices under the hub.
-        */
-       if (hcd->driver->update_hub_device) {
-               ret = hcd->driver->update_hub_device(hcd, hdev,
-                               &hub->tt, GFP_KERNEL);
-               if (ret < 0) {
-                       message = "can't update HCD hub info";
-                       goto fail;
-               }
-       }
-
        ret = hub_hub_status(hub, &hubstatus, &hubchange);
        if (ret < 0) {
                message = "can't get hub status";
@@ -1589,10 +1596,28 @@ static int hub_configure(struct usb_hub *hub,
                }
        }
        hdev->maxchild = i;
+       for (i = 0; i < hdev->maxchild; i++) {
+               struct usb_port *port_dev = hub->ports[i];
+
+               pm_runtime_put(&port_dev->dev);
+       }
+
        mutex_unlock(&usb_port_peer_mutex);
        if (ret < 0)
                goto fail;
 
+       /* Update the HCD's internal representation of this hub before khubd
+        * starts getting port status changes for devices under the hub.
+        */
+       if (hcd->driver->update_hub_device) {
+               ret = hcd->driver->update_hub_device(hcd, hdev,
+                               &hub->tt, GFP_KERNEL);
+               if (ret < 0) {
+                       message = "can't update HCD hub info";
+                       goto fail;
+               }
+       }
+
        usb_hub_adjust_deviceremovable(hdev, hub->descriptor);
 
        hub_activate(hub, HUB_INIT);
@@ -3458,7 +3483,8 @@ static int hub_suspend(struct usb_interface *intf, pm_message_t msg)
                struct usb_device *udev = port_dev->child;
 
                if (udev && udev->can_submit) {
-                       dev_warn(&port_dev->dev, "not suspended yet\n");
+                       dev_warn(&port_dev->dev, "device %s not suspended yet\n",
+                                       dev_name(&udev->dev));
                        if (PMSG_IS_AUTO(msg))
                                return -EBUSY;
                }
index 0a7cdc0..326308e 100644 (file)
@@ -84,6 +84,7 @@ struct usb_hub {
  * @dev: generic device interface
  * @port_owner: port's owner
  * @peer: related usb2 and usb3 ports (share the same connector)
+ * @req: default pm qos request for hubs without port power control
  * @connect_type: port's connect type
  * @location: opaque representation of platform connector location
  * @status_lock: synchronize port_event() vs usb_port_{suspend|resume}
@@ -95,6 +96,7 @@ struct usb_port {
        struct device dev;
        struct usb_dev_state *port_owner;
        struct usb_port *peer;
+       struct dev_pm_qos_request *req;
        enum usb_port_connect_type connect_type;
        usb_port_location_t location;
        struct mutex status_lock;
index 62036fa..fe1b6d0 100644 (file)
@@ -21,6 +21,8 @@
 
 #include "hub.h"
 
+static int usb_port_block_power_off;
+
 static const struct attribute_group *port_dev_group[];
 
 static ssize_t connect_type_show(struct device *dev,
@@ -66,6 +68,7 @@ static void usb_port_device_release(struct device *dev)
 {
        struct usb_port *port_dev = to_usb_port(dev);
 
+       kfree(port_dev->req);
        kfree(port_dev);
 }
 
@@ -142,6 +145,9 @@ static int usb_port_runtime_suspend(struct device *dev)
                        == PM_QOS_FLAGS_ALL)
                return -EAGAIN;
 
+       if (usb_port_block_power_off)
+               return -EBUSY;
+
        usb_autopm_get_interface(intf);
        retval = usb_hub_set_port_power(hdev, hub, port1, false);
        usb_clear_port_feature(hdev, port1, USB_PORT_FEAT_C_CONNECTION);
@@ -190,11 +196,19 @@ static int link_peers(struct usb_port *left, struct usb_port *right)
        if (left->peer || right->peer) {
                struct usb_port *lpeer = left->peer;
                struct usb_port *rpeer = right->peer;
-
-               WARN(1, "failed to peer %s and %s (%s -> %p) (%s -> %p)\n",
-                       dev_name(&left->dev), dev_name(&right->dev),
-                       dev_name(&left->dev), lpeer,
-                       dev_name(&right->dev), rpeer);
+               char *method;
+
+               if (left->location && left->location == right->location)
+                       method = "location";
+               else
+                       method = "default";
+
+               pr_warn("usb: failed to peer %s and %s by %s (%s:%s) (%s:%s)\n",
+                       dev_name(&left->dev), dev_name(&right->dev), method,
+                       dev_name(&left->dev),
+                       lpeer ? dev_name(&lpeer->dev) : "none",
+                       dev_name(&right->dev),
+                       rpeer ? dev_name(&rpeer->dev) : "none");
                return -EBUSY;
        }
 
@@ -251,6 +265,7 @@ static void link_peers_report(struct usb_port *left, struct usb_port *right)
                dev_warn(&left->dev, "failed to peer to %s (%d)\n",
                                dev_name(&right->dev), rc);
                pr_warn_once("usb: port power management may be unreliable\n");
+               usb_port_block_power_off = 1;
        }
 }
 
@@ -386,9 +401,13 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
        int retval;
 
        port_dev = kzalloc(sizeof(*port_dev), GFP_KERNEL);
-       if (!port_dev) {
-               retval = -ENOMEM;
-               goto exit;
+       if (!port_dev)
+               return -ENOMEM;
+
+       port_dev->req = kzalloc(sizeof(*(port_dev->req)), GFP_KERNEL);
+       if (!port_dev->req) {
+               kfree(port_dev);
+               return -ENOMEM;
        }
 
        hub->ports[port1 - 1] = port_dev;
@@ -404,31 +423,53 @@ int usb_hub_create_port_device(struct usb_hub *hub, int port1)
                        port1);
        mutex_init(&port_dev->status_lock);
        retval = device_register(&port_dev->dev);
-       if (retval)
-               goto error_register;
+       if (retval) {
+               put_device(&port_dev->dev);
+               return retval;
+       }
+
+       /* Set default policy of port-poweroff disabled. */
+       retval = dev_pm_qos_add_request(&port_dev->dev, port_dev->req,
+                       DEV_PM_QOS_FLAGS, PM_QOS_FLAG_NO_POWER_OFF);
+       if (retval < 0) {
+               device_unregister(&port_dev->dev);
+               return retval;
+       }
 
        find_and_link_peer(hub, port1);
 
+       /*
+        * Enable runtime pm and hold a refernce that hub_configure()
+        * will drop once the PM_QOS_NO_POWER_OFF flag state has been set
+        * and the hub has been fully registered (hdev->maxchild set).
+        */
        pm_runtime_set_active(&port_dev->dev);
+       pm_runtime_get_noresume(&port_dev->dev);
+       pm_runtime_enable(&port_dev->dev);
+       device_enable_async_suspend(&port_dev->dev);
 
        /*
-        * Do not enable port runtime pm if the hub does not support
-        * power switching.  Also, userspace must have final say of
-        * whether a port is permitted to power-off.  Do not enable
-        * runtime pm if we fail to expose pm_qos_no_power_off.
+        * Keep hidden the ability to enable port-poweroff if the hub
+        * does not support power switching.
         */
-       if (hub_is_port_power_switchable(hub)
-                       && dev_pm_qos_expose_flags(&port_dev->dev,
-                       PM_QOS_FLAG_NO_POWER_OFF) == 0)
-               pm_runtime_enable(&port_dev->dev);
+       if (!hub_is_port_power_switchable(hub))
+               return 0;
 
-       device_enable_async_suspend(&port_dev->dev);
-       return 0;
+       /* Attempt to let userspace take over the policy. */
+       retval = dev_pm_qos_expose_flags(&port_dev->dev,
+                       PM_QOS_FLAG_NO_POWER_OFF);
+       if (retval < 0) {
+               dev_warn(&port_dev->dev, "failed to expose pm_qos_no_poweroff\n");
+               return 0;
+       }
 
-error_register:
-       put_device(&port_dev->dev);
-exit:
-       return retval;
+       /* Userspace owns the policy, drop the kernel 'no_poweroff' request. */
+       retval = dev_pm_qos_remove_request(port_dev->req);
+       if (retval >= 0) {
+               kfree(port_dev->req);
+               port_dev->req = NULL;
+       }
+       return 0;
 }
 
 void usb_hub_remove_port_device(struct usb_hub *hub, int port1)
index 8eb996e..261c3b4 100644 (file)
@@ -45,6 +45,7 @@ comment "Platform Glue Driver Support"
 config USB_DWC3_OMAP
        tristate "Texas Instruments OMAP5 and similar Platforms"
        depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST)
+       depends on OF
        default USB_DWC3
        help
          Some platforms from Texas Instruments like OMAP5, DRA7xxx and
index 4af4c35..07a736a 100644 (file)
@@ -322,7 +322,7 @@ static int dwc3_omap_remove_core(struct device *dev, void *c)
 {
        struct platform_device *pdev = to_platform_device(dev);
 
-       platform_device_unregister(pdev);
+       of_device_unregister(pdev);
 
        return 0;
 }
@@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
 
-       dwc3_omap_disable_irqs(omap);
+       dwc3_omap_write_irqmisc_set(omap, 0x00);
 
        return 0;
 }
@@ -607,8 +607,19 @@ static int dwc3_omap_prepare(struct device *dev)
 static void dwc3_omap_complete(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
+       u32                     reg;
 
-       dwc3_omap_enable_irqs(omap);
+       reg = (USBOTGSS_IRQMISC_OEVT |
+                       USBOTGSS_IRQMISC_DRVVBUS_RISE |
+                       USBOTGSS_IRQMISC_CHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
+                       USBOTGSS_IRQMISC_IDPULLUP_RISE |
+                       USBOTGSS_IRQMISC_DRVVBUS_FALL |
+                       USBOTGSS_IRQMISC_CHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
+                       USBOTGSS_IRQMISC_IDPULLUP_FALL);
+
+       dwc3_omap_write_irqmisc_set(omap, reg);
 }
 
 static int dwc3_omap_suspend(struct device *dev)
index 9d64dd0..dab7927 100644 (file)
@@ -828,10 +828,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
                        length, last ? " last" : "",
                        chain ? " chain" : "");
 
-       /* Skip the LINK-TRB on ISOC */
-       if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
-                       usb_endpoint_xfer_isoc(dep->endpoint.desc))
-               dep->free_slot++;
 
        trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
 
@@ -843,6 +839,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
        }
 
        dep->free_slot++;
+       /* Skip the LINK-TRB on ISOC */
+       if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
+                       usb_endpoint_xfer_isoc(dep->endpoint.desc))
+               dep->free_slot++;
 
        trb->size = DWC3_TRB_SIZE_LENGTH(length);
        trb->bpl = lower_32_bits(dma);
index 2ddcd63..9714214 100644 (file)
@@ -1145,15 +1145,15 @@ static struct configfs_item_operations interf_item_ops = {
        .store_attribute        = usb_os_desc_attr_store,
 };
 
-static ssize_t rndis_grp_compatible_id_show(struct usb_os_desc *desc,
-                                           char *page)
+static ssize_t interf_grp_compatible_id_show(struct usb_os_desc *desc,
+                                            char *page)
 {
        memcpy(page, desc->ext_compat_id, 8);
        return 8;
 }
 
-static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc,
-                                            const char *page, size_t len)
+static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
+                                             const char *page, size_t len)
 {
        int l;
 
@@ -1171,20 +1171,20 @@ static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc,
        return len;
 }
 
-static struct usb_os_desc_attribute rndis_grp_attr_compatible_id =
+static struct usb_os_desc_attribute interf_grp_attr_compatible_id =
        __CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR,
-                       rndis_grp_compatible_id_show,
-                       rndis_grp_compatible_id_store);
+                       interf_grp_compatible_id_show,
+                       interf_grp_compatible_id_store);
 
-static ssize_t rndis_grp_sub_compatible_id_show(struct usb_os_desc *desc,
-                                               char *page)
+static ssize_t interf_grp_sub_compatible_id_show(struct usb_os_desc *desc,
+                                                char *page)
 {
        memcpy(page, desc->ext_compat_id + 8, 8);
        return 8;
 }
 
-static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc,
-                                                const char *page, size_t len)
+static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
+                                                 const char *page, size_t len)
 {
        int l;
 
@@ -1202,20 +1202,21 @@ static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc,
        return len;
 }
 
-static struct usb_os_desc_attribute rndis_grp_attr_sub_compatible_id =
+static struct usb_os_desc_attribute interf_grp_attr_sub_compatible_id =
        __CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR,
-                       rndis_grp_sub_compatible_id_show,
-                       rndis_grp_sub_compatible_id_store);
+                       interf_grp_sub_compatible_id_show,
+                       interf_grp_sub_compatible_id_store);
 
 static struct configfs_attribute *interf_grp_attrs[] = {
-       &rndis_grp_attr_compatible_id.attr,
-       &rndis_grp_attr_sub_compatible_id.attr,
+       &interf_grp_attr_compatible_id.attr,
+       &interf_grp_attr_sub_compatible_id.attr,
        NULL
 };
 
 int usb_os_desc_prepare_interf_dir(struct config_group *parent,
                                   int n_interf,
                                   struct usb_os_desc **desc,
+                                  char **names,
                                   struct module *owner)
 {
        struct config_group **f_default_groups, *os_desc_group,
@@ -1257,8 +1258,8 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
                d = desc[n_interf];
                d->owner = owner;
                config_group_init_type_name(&d->group, "", interface_type);
-               config_item_set_name(&d->group.cg_item, "interface.%d",
-                                    n_interf);
+               config_item_set_name(&d->group.cg_item, "interface.%s",
+                                    names[n_interf]);
                interface_groups[n_interf] = &d->group;
        }
 
index a14ac79..36c468c 100644 (file)
@@ -8,6 +8,7 @@ void unregister_gadget_item(struct config_item *item);
 int usb_os_desc_prepare_interf_dir(struct config_group *parent,
                                   int n_interf,
                                   struct usb_os_desc **desc,
+                                  char **names,
                                   struct module *owner);
 
 static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
index 74202d6..8598c27 100644 (file)
@@ -1483,11 +1483,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
        ffs->ep0req->context = ffs;
 
        lang = ffs->stringtabs;
-       for (lang = ffs->stringtabs; *lang; ++lang) {
-               struct usb_string *str = (*lang)->strings;
-               int id = first_id;
-               for (; str->s; ++id, ++str)
-                       str->id = id;
+       if (lang) {
+               for (; *lang; ++lang) {
+                       struct usb_string *str = (*lang)->strings;
+                       int id = first_id;
+                       for (; str->s; ++id, ++str)
+                               str->id = id;
+               }
        }
 
        ffs->gadget = cdev->gadget;
index f2b7817..b9cfc15 100644 (file)
@@ -721,7 +721,8 @@ struct net_device *gphonet_setup_default(void)
        struct phonet_port *port;
 
        /* Create net device */
-       dev = alloc_netdev(sizeof(*port), "upnlink%d", pn_net_setup);
+       dev = alloc_netdev(sizeof(*port), "upnlink%d", NET_NAME_UNKNOWN,
+                          pn_net_setup);
        if (!dev)
                return ERR_PTR(-ENOMEM);
 
index eed3ad8..9c41e95 100644 (file)
@@ -687,7 +687,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
                f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
                                           GFP_KERNEL);
                if (!f->os_desc_table)
-                       return PTR_ERR(f->os_desc_table);
+                       return -ENOMEM;
                f->os_desc_n = 1;
                f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
        }
@@ -905,6 +905,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
 {
        struct f_rndis_opts *opts;
        struct usb_os_desc *descs[1];
+       char *names[1];
 
        opts = kzalloc(sizeof(*opts), GFP_KERNEL);
        if (!opts)
@@ -922,8 +923,9 @@ static struct usb_function_instance *rndis_alloc_inst(void)
        INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop);
 
        descs[0] = &opts->rndis_os_desc;
+       names[0] = "rndis";
        usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
-                                      THIS_MODULE);
+                                      names, THIS_MODULE);
        config_group_init_type_name(&opts->func_inst.group, "",
                                    &rndis_func_type);
 
index 99a37ed..c7004ee 100644 (file)
@@ -1532,8 +1532,9 @@ static int gr_ep_enable(struct usb_ep *_ep,
                        "%s mode: multiple trans./microframe not valid\n",
                        (mode == 2 ? "Bulk" : "Control"));
                return -EINVAL;
-       } else if (nt == 0x11) {
-               dev_err(dev->dev, "Invalid value for trans./microframe\n");
+       } else if (nt == 0x3) {
+               dev_err(dev->dev,
+                       "Invalid value 0x3 for additional trans./microframe\n");
                return -EINVAL;
        } else if ((nt + 1) * max > buffer_size) {
                dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
index ee6c164..2e4ce77 100644 (file)
@@ -1264,8 +1264,13 @@ dev_release (struct inode *inode, struct file *fd)
 
        kfree (dev->buf);
        dev->buf = NULL;
-       put_dev (dev);
 
+       /* other endpoints were all decoupled from this device */
+       spin_lock_irq(&dev->lock);
+       dev->state = STATE_DEV_DISABLED;
+       spin_unlock_irq(&dev->lock);
+
+       put_dev (dev);
        return 0;
 }
 
index 3d78a88..97b0277 100644 (file)
@@ -1120,7 +1120,10 @@ void gether_disconnect(struct gether *link)
 
        DBG(dev, "%s\n", __func__);
 
+       netif_tx_lock(dev->net);
        netif_stop_queue(dev->net);
+       netif_tx_unlock(dev->net);
+
        netif_carrier_off(dev->net);
 
        /* disable endpoints, forcing (synchronous) completion
index 61b7817..03314f8 100644 (file)
@@ -176,7 +176,7 @@ config USB_EHCI_HCD_AT91
 
 config USB_EHCI_MSM
        tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
-       depends on ARCH_MSM
+       depends on ARCH_MSM || ARCH_QCOM
        select USB_EHCI_ROOT_HUB_TT
        ---help---
          Enables support for the USB Host controller present on the
index 4a6d3dd..2f3aceb 100644 (file)
@@ -656,6 +656,14 @@ static const struct dmi_system_id ehci_dmi_nohandoff_table[] = {
                        DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"),
                },
        },
+       {
+               /* HASEE E200 */
+               .matches = {
+                       DMI_MATCH(DMI_BOARD_VENDOR, "HASEE"),
+                       DMI_MATCH(DMI_BOARD_NAME, "E210"),
+                       DMI_MATCH(DMI_BIOS_VERSION, "6.00"),
+               },
+       },
        { }
 };
 
@@ -665,9 +673,14 @@ static void ehci_bios_handoff(struct pci_dev *pdev,
 {
        int try_handoff = 1, tried_handoff = 0;
 
-       /* The Pegatron Lucid tablet sporadically waits for 98 seconds trying
-        * the handoff on its unused controller.  Skip it. */
-       if (pdev->vendor == 0x8086 && pdev->device == 0x283a) {
+       /*
+        * The Pegatron Lucid tablet sporadically waits for 98 seconds trying
+        * the handoff on its unused controller.  Skip it.
+        *
+        * The HASEE E200 hangs when the semaphore is set (bugzilla #77021).
+        */
+       if (pdev->vendor == 0x8086 && (pdev->device == 0x283a ||
+                       pdev->device == 0x27cc)) {
                if (dmi_check_system(ehci_dmi_nohandoff_table))
                        try_handoff = 0;
        }
index 6231ce6..aa79e87 100644 (file)
@@ -22,6 +22,7 @@
 
 
 #include <linux/slab.h>
+#include <linux/device.h>
 #include <asm/unaligned.h>
 
 #include "xhci.h"
@@ -287,7 +288,7 @@ static int xhci_stop_device(struct xhci_hcd *xhci, int slot_id, int suspend)
                if (virt_dev->eps[i].ring && virt_dev->eps[i].ring->dequeue) {
                        struct xhci_command *command;
                        command = xhci_alloc_command(xhci, false, false,
-                                                    GFP_NOIO);
+                                                    GFP_NOWAIT);
                        if (!command) {
                                spin_unlock_irqrestore(&xhci->lock, flags);
                                xhci_free_command(xhci, cmd);
@@ -1139,7 +1140,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
                 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
                 * is enabled, so also enable remote wake here.
                 */
-               if (hcd->self.root_hub->do_remote_wakeup) {
+               if (hcd->self.root_hub->do_remote_wakeup
+                               && device_may_wakeup(hcd->self.controller)) {
+
                        if (t1 & PORT_CONNECT) {
                                t2 |= PORT_WKOC_E | PORT_WKDISC_E;
                                t2 &= ~PORT_WKCONN_E;
index d67ff71..749fc68 100644 (file)
@@ -1433,8 +1433,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
                xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
                break;
        case TRB_RESET_DEV:
-               WARN_ON(slot_id != TRB_TO_SLOT_ID(
-                               le32_to_cpu(cmd_trb->generic.field[3])));
+               /* SLOT_ID field in reset device cmd completion event TRB is 0.
+                * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
+                */
+               slot_id = TRB_TO_SLOT_ID(
+                               le32_to_cpu(cmd_trb->generic.field[3]));
                xhci_handle_cmd_reset_dev(xhci, slot_id, event);
                break;
        case TRB_NEC_GET_FW:
@@ -3534,7 +3537,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
                return 0;
 
        max_burst = urb->ep->ss_ep_comp.bMaxBurst;
-       return roundup(total_packet_count, max_burst + 1) - 1;
+       return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
 }
 
 /*
index 2b8d9a2..7436d5f 100644 (file)
@@ -936,7 +936,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
  */
 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 {
-       u32                     command, temp = 0;
+       u32                     command, temp = 0, status;
        struct usb_hcd          *hcd = xhci_to_hcd(xhci);
        struct usb_hcd          *secondary_hcd;
        int                     retval = 0;
@@ -1054,8 +1054,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
 
  done:
        if (retval == 0) {
-               usb_hcd_resume_root_hub(hcd);
-               usb_hcd_resume_root_hub(xhci->shared_hcd);
+               /* Resume root hubs only when have pending events. */
+               status = readl(&xhci->op_regs->status);
+               if (status & STS_EINT) {
+                       usb_hcd_resume_root_hub(hcd);
+                       usb_hcd_resume_root_hub(xhci->shared_hcd);
+               }
        }
 
        /*
index 51a6da2..829f446 100644 (file)
@@ -7,7 +7,7 @@
 #include <linux/moduleparam.h>
 #include <linux/scatterlist.h>
 #include <linux/mutex.h>
-
+#include <linux/timer.h>
 #include <linux/usb.h>
 
 #define SIMPLE_IO_TIMEOUT      10000   /* in milliseconds */
@@ -484,6 +484,14 @@ alloc_sglist(int nents, int max, int vary)
        return sg;
 }
 
+static void sg_timeout(unsigned long _req)
+{
+       struct usb_sg_request   *req = (struct usb_sg_request *) _req;
+
+       req->status = -ETIMEDOUT;
+       usb_sg_cancel(req);
+}
+
 static int perform_sglist(
        struct usbtest_dev      *tdev,
        unsigned                iterations,
@@ -495,6 +503,9 @@ static int perform_sglist(
 {
        struct usb_device       *udev = testdev_to_usbdev(tdev);
        int                     retval = 0;
+       struct timer_list       sg_timer;
+
+       setup_timer_on_stack(&sg_timer, sg_timeout, (unsigned long) req);
 
        while (retval == 0 && iterations-- > 0) {
                retval = usb_sg_init(req, udev, pipe,
@@ -505,7 +516,10 @@ static int perform_sglist(
 
                if (retval)
                        break;
+               mod_timer(&sg_timer, jiffies +
+                               msecs_to_jiffies(SIMPLE_IO_TIMEOUT));
                usb_sg_wait(req);
+               del_timer_sync(&sg_timer);
                retval = req->status;
 
                /* FIXME check resulting data pattern */
index d235378..1e58ed2 100644 (file)
@@ -19,21 +19,6 @@ err:
        return ret;
 }
 
-static int of_remove_populated_child(struct device *dev, void *d)
-{
-       struct platform_device *pdev = to_platform_device(dev);
-
-       of_device_unregister(pdev);
-       return 0;
-}
-
-static int am335x_child_remove(struct platform_device *pdev)
-{
-       device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
-       pm_runtime_disable(&pdev->dev);
-       return 0;
-}
-
 static const struct of_device_id am335x_child_of_match[] = {
        { .compatible = "ti,am33xx-usb" },
        {  },
@@ -42,13 +27,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match);
 
 static struct platform_driver am335x_child_driver = {
        .probe          = am335x_child_probe,
-       .remove         = am335x_child_remove,
        .driver         = {
                .name   = "am335x-usb-childs",
                .of_match_table = am335x_child_of_match,
        },
 };
 
-module_platform_driver(am335x_child_driver);
+static int __init am335x_child_init(void)
+{
+       return platform_driver_register(&am335x_child_driver);
+}
+module_init(am335x_child_init);
+
 MODULE_DESCRIPTION("AM33xx child devices");
 MODULE_LICENSE("GPL v2");
index 61da471..eff3c5c 100644 (file)
@@ -849,7 +849,7 @@ b_host:
        }
 
        /* handle babble condition */
-       if (int_usb & MUSB_INTR_BABBLE)
+       if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb))
                schedule_work(&musb->recover_work);
 
 #if 0
index 7b8bbf5..5341bb2 100644 (file)
@@ -318,7 +318,7 @@ static void cppi41_dma_callback(void *private_data)
                }
                list_add_tail(&cppi41_channel->tx_check,
                                &controller->early_tx_list);
-               if (!hrtimer_active(&controller->early_tx)) {
+               if (!hrtimer_is_queued(&controller->early_tx)) {
                        hrtimer_start_range_ns(&controller->early_tx,
                                ktime_set(0, 140 * NSEC_PER_USEC),
                                40 * NSEC_PER_USEC,
index 51beb13..09529f9 100644 (file)
@@ -494,10 +494,9 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
        struct dsps_glue *glue = dev_get_drvdata(dev->parent);
        const struct dsps_musb_wrapper *wrp = glue->wrp;
        void __iomem *ctrl_base = musb->ctrl_base;
-       void __iomem *base = musb->mregs;
        u32 reg;
 
-       reg = dsps_readl(base, wrp->mode);
+       reg = dsps_readl(ctrl_base, wrp->mode);
 
        switch (mode) {
        case MUSB_HOST:
@@ -510,7 +509,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
                 */
                reg |= (1 << wrp->iddig_mux);
 
-               dsps_writel(base, wrp->mode, reg);
+               dsps_writel(ctrl_base, wrp->mode, reg);
                dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
                break;
        case MUSB_PERIPHERAL:
@@ -523,10 +522,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
                 */
                reg |= (1 << wrp->iddig_mux);
 
-               dsps_writel(base, wrp->mode, reg);
+               dsps_writel(ctrl_base, wrp->mode, reg);
                break;
        case MUSB_OTG:
-               dsps_writel(base, wrp->phy_utmi, 0x02);
+               dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
                break;
        default:
                dev_err(glue->dev, "unsupported mode %d\n", mode);
index c2e45e6..f202e50 100644 (file)
@@ -274,7 +274,6 @@ static int ux500_probe(struct platform_device *pdev)
        musb->dev.parent                = &pdev->dev;
        musb->dev.dma_mask              = &pdev->dev.coherent_dma_mask;
        musb->dev.coherent_dma_mask     = pdev->dev.coherent_dma_mask;
-       musb->dev.of_node               = pdev->dev.of_node;
 
        glue->dev                       = &pdev->dev;
        glue->musb                      = musb;
index ced34f3..c929370 100644 (file)
@@ -1229,7 +1229,9 @@ static void msm_otg_sm_work(struct work_struct *w)
                        motg->chg_state = USB_CHG_STATE_UNDEFINED;
                        motg->chg_type = USB_INVALID_CHARGER;
                }
-               pm_runtime_put_sync(otg->phy->dev);
+
+               if (otg->phy->state == OTG_STATE_B_IDLE)
+                       pm_runtime_put_sync(otg->phy->dev);
                break;
        case OTG_STATE_B_PERIPHERAL:
                dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
index d49f9c3..4fd3653 100644 (file)
@@ -681,6 +681,14 @@ usbhs_fifo_read_end:
                usbhs_pipe_number(pipe),
                pkt->length, pkt->actual, *is_done, pkt->zero);
 
+       /*
+        * Transmission end
+        */
+       if (*is_done) {
+               if (usbhs_pipe_is_dcp(pipe))
+                       usbhs_dcp_control_transfer_done(pipe);
+       }
+
 usbhs_fifo_read_busy:
        usbhsf_fifo_unselect(pipe, fifo);
 
index 762e4a5..330df5c 100644 (file)
@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
        { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
        { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
+       { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
        { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
        { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
        { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
index edf3b12..8a3813b 100644 (file)
@@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
-       { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) },
+       { USB_DEVICE(TESTO_VID, TESTO_1_PID) },
+       { USB_DEVICE(TESTO_VID, TESTO_3_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
@@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
        { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
+       /* Infineon Devices */
+       { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
        { }                                     /* Terminating entry */
 };
 
@@ -1566,14 +1569,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
        struct usb_device *udev = serial->dev;
 
        struct usb_interface *interface = serial->interface;
-       struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc;
+       struct usb_endpoint_descriptor *ep_desc;
 
        unsigned num_endpoints;
-       int i;
+       unsigned i;
 
        num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
        dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
 
+       if (!num_endpoints)
+               return;
+
        /* NOTE: some customers have programmed FT232R/FT245R devices
         * with an endpoint size of 0 - not good.  In this case, we
         * want to override the endpoint descriptor setting and use a
index 500474c..c4777bc 100644 (file)
 #define RATOC_VENDOR_ID                0x0584
 #define RATOC_PRODUCT_ID_USB60F        0xb020
 
+/*
+ * Infineon Technologies
+ */
+#define INFINEON_VID           0x058b
+#define INFINEON_TRIBOARD_PID  0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
+
 /*
  * Acton Research Corp.
  */
  * Submitted by Colin Leroy
  */
 #define TESTO_VID                      0x128D
-#define TESTO_USB_INTERFACE_PID                0x0001
+#define TESTO_1_PID                    0x0001
+#define TESTO_3_PID                    0x0003
 
 /*
  * Mobility Electronics products.
index 59c3108..a968894 100644 (file)
@@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
 /* Zoom */
 #define ZOOM_PRODUCT_4597                      0x9607
 
+/* SpeedUp SU9800 usb 3g modem */
+#define SPEEDUP_PRODUCT_SU9800                 0x9800
+
 /* Haier products */
 #define HAIER_VENDOR_ID                                0x201e
 #define HAIER_PRODUCT_CE100                    0x2009
@@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
 /* Olivetti products */
 #define OLIVETTI_VENDOR_ID                     0x0b3c
 #define OLIVETTI_PRODUCT_OLICARD100            0xc000
+#define OLIVETTI_PRODUCT_OLICARD120            0xc001
+#define OLIVETTI_PRODUCT_OLICARD140            0xc002
 #define OLIVETTI_PRODUCT_OLICARD145            0xc003
+#define OLIVETTI_PRODUCT_OLICARD155            0xc004
 #define OLIVETTI_PRODUCT_OLICARD200            0xc005
+#define OLIVETTI_PRODUCT_OLICARD160            0xc00a
 #define OLIVETTI_PRODUCT_OLICARD500            0xc00b
 
 /* Celot products */
@@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
                .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff),  /* ZTE MF91 */
                .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
+       { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff),  /* Telewell TW-LTE 4G v2 */
+               .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
          .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
        },
+       { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
        { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
        { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = {
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
        { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
-
-       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
-               .driver_info = (kernel_ulong_t)&net_intf6_blacklist
-       },
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
+       { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
+               .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
        { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
-               .driver_info = (kernel_ulong_t)&net_intf4_blacklist
-       },
+               .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
        { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
        { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
        { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
index 9d38ddc..866b5df 100644 (file)
@@ -256,6 +256,10 @@ static int slave_configure(struct scsi_device *sdev)
                if (us->fflags & US_FL_WRITE_CACHE)
                        sdev->wce_default_on = 1;
 
+               /* A few buggy USB-ATA bridges don't understand FUA */
+               if (us->fflags & US_FL_BROKEN_FUA)
+                       sdev->broken_fua = 1;
+
        } else {
 
                /* Non-disk-type devices don't need to blacklist any pages
index 174a447..80a5b36 100644 (file)
@@ -1936,6 +1936,13 @@ UNUSUAL_DEV(  0x14cd, 0x6600, 0x0201, 0x0201,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_IGNORE_RESIDUE ),
 
+/* Reported by Michael Büsch <m@bues.ch> */
+UNUSUAL_DEV(  0x152d, 0x0567, 0x0114, 0x0114,
+               "JMicron",
+               "USB to ATA/ATAPI Bridge",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_BROKEN_FUA ),
+
 /* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
  * JMicron responds to USN and several other SCSI ioctls with a
  * residue that causes subsequent I/O requests to fail.  */
index 971a760..8dae2f7 100644 (file)
@@ -700,14 +700,6 @@ static void handle_rx_net(struct vhost_work *work)
        handle_rx(net);
 }
 
-static void vhost_net_free(void *addr)
-{
-       if (is_vmalloc_addr(addr))
-               vfree(addr);
-       else
-               kfree(addr);
-}
-
 static int vhost_net_open(struct inode *inode, struct file *f)
 {
        struct vhost_net *n;
@@ -723,7 +715,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
        }
        vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
        if (!vqs) {
-               vhost_net_free(n);
+               kvfree(n);
                return -ENOMEM;
        }
 
@@ -840,7 +832,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
         * since jobs can re-queue themselves. */
        vhost_net_flush(n);
        kfree(n->dev.vqs);
-       vhost_net_free(n);
+       kvfree(n);
        return 0;
 }
 
index 4f4ffa4..69906ca 100644 (file)
@@ -1503,14 +1503,6 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
        return 0;
 }
 
-static void vhost_scsi_free(struct vhost_scsi *vs)
-{
-       if (is_vmalloc_addr(vs))
-               vfree(vs);
-       else
-               kfree(vs);
-}
-
 static int vhost_scsi_open(struct inode *inode, struct file *f)
 {
        struct vhost_scsi *vs;
@@ -1550,7 +1542,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
        return 0;
 
 err_vqs:
-       vhost_scsi_free(vs);
+       kvfree(vs);
 err_vs:
        return r;
 }
@@ -1569,7 +1561,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
        /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
        vhost_scsi_flush(vs);
        kfree(vs->dev.vqs);
-       vhost_scsi_free(vs);
+       kvfree(vs);
        return 0;
 }
 
index b63860f..40bec8d 100644 (file)
@@ -77,3 +77,4 @@ const struct consw dummy_con = {
     .con_set_palette = DUMMY,
     .con_scrolldelta = DUMMY,
 };
+EXPORT_SYMBOL_GPL(dummy_con);
index f267284..6e6aa70 100644 (file)
@@ -1441,5 +1441,6 @@ const struct consw vga_con = {
        .con_build_attr = vgacon_build_attr,
        .con_invert_region = vgacon_invert_region,
 };
+EXPORT_SYMBOL(vga_con);
 
 MODULE_LICENSE("GPL");
index e683b6e..d36e830 100644 (file)
@@ -1057,6 +1057,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
                goto put_display_node;
        }
 
+       INIT_LIST_HEAD(&pdata->pwr_gpios);
        ret = -ENOMEM;
        for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
                gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
@@ -1082,6 +1083,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
                        dev_err(dev, "set direction output gpio %d failed\n", gpio);
                        goto put_display_node;
                }
+               list_add(&og->list, &pdata->pwr_gpios);
        }
 
        if (is_gpio_power)
index a54f7f7..8fe41ca 100644 (file)
@@ -408,7 +408,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client,
        /* Workaround "PPI Does Not Start Properly In Specific Mode" */
        if (ANOMALY_05000400) {
                ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW,
-                                       "PPI0_FS3")
+                                       "PPI0_FS3");
                if (ret) {
                        dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
                        ret = -EBUSY;
index 7d44d66..43a0a52 100644 (file)
@@ -91,15 +91,6 @@ extern boot_infos_t *boot_infos;
 #define AVIVO_DC_LUTB_WHITE_OFFSET_GREEN        0x6cd4
 #define AVIVO_DC_LUTB_WHITE_OFFSET_RED          0x6cd8
 
-#define FB_RIGHT_POS(p, bpp)         (fb_be_math(p) ? 0 : (32 - (bpp)))
-
-static inline u32 offb_cmap_byteswap(struct fb_info *info, u32 value)
-{
-       u32 bpp = info->var.bits_per_pixel;
-
-       return cpu_to_be32(value) >> FB_RIGHT_POS(info, bpp);
-}
-
     /*
      *  Set a single color register. The values supplied are already
      *  rounded down to the hardware's capabilities (according to the
@@ -129,7 +120,7 @@ static int offb_setcolreg(u_int regno, u_int red, u_int green, u_int blue,
                        mask <<= info->var.transp.offset;
                        value |= mask;
                }
-               pal[regno] = offb_cmap_byteswap(info, value);
+               pal[regno] = value;
                return 0;
        }
 
index 99af9e8..2f0822e 100644 (file)
@@ -121,9 +121,11 @@ static void __init omapdss_add_to_list(struct device_node *node, bool root)
 {
        struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node),
                GFP_KERNEL);
-       n->node = node;
-       n->root = root;
-       list_add(&n->list, &dss_conv_list);
+       if (n) {
+               n->node = node;
+               n->root = root;
+               list_add(&n->list, &dss_conv_list);
+       }
 }
 
 static bool __init omapdss_list_contains(const struct device_node *node)
index a8f2b28..a1134c3 100644 (file)
@@ -474,8 +474,6 @@ static int vt8500lcd_remove(struct platform_device *pdev)
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        release_mem_region(res->start, resource_size(res));
 
-       kfree(fbi);
-
        return 0;
 }
 
index 67b067a..a5df5e8 100644 (file)
@@ -66,7 +66,7 @@ static u8 mxc_w1_ds2_reset_bus(void *data)
 
                udelay(100);
        }
-       return !!(reg_val & MXC_W1_CONTROL_PST);
+       return !(reg_val & MXC_W1_CONTROL_PST);
 }
 
 /*
index c845527..76dd541 100644 (file)
@@ -1280,14 +1280,17 @@ config WATCHDOG_RTAS
 
 # S390 Architecture
 
-config ZVM_WATCHDOG
-       tristate "z/VM Watchdog Timer"
+config DIAG288_WATCHDOG
+       tristate "System z diag288 Watchdog"
        depends on S390
+       select WATCHDOG_CORE
        help
          IBM s/390 and zSeries machines running under z/VM 5.1 or later
          provide a virtual watchdog timer to their guest that cause a
          user define Control Program command to be executed after a
          timeout.
+         LPAR provides a very similar interface. This driver handles
+         both.
 
          To compile this driver as a module, choose M here. The module
          will be called vmwatchdog.
index 7b8a91e..468c320 100644 (file)
@@ -153,6 +153,7 @@ obj-$(CONFIG_MEN_A21_WDT) += mena21_wdt.o
 obj-$(CONFIG_WATCHDOG_RTAS) += wdrtas.o
 
 # S390 Architecture
+obj-$(CONFIG_DIAG288_WATCHDOG) += diag288_wdt.o
 
 # SUPERH (sh + sh64) Architecture
 obj-$(CONFIG_SH_WDT) += shwdt.o
diff --git a/drivers/watchdog/diag288_wdt.c b/drivers/watchdog/diag288_wdt.c
new file mode 100644 (file)
index 0000000..429494b
--- /dev/null
@@ -0,0 +1,316 @@
+/*
+ * Watchdog driver for z/VM and LPAR using the diag 288 interface.
+ *
+ * Under z/VM, expiration of the watchdog will send a "system restart" command
+ * to CP.
+ *
+ * The command can be altered using the module parameter "cmd". This is
+ * not recommended because it's only supported on z/VM but not whith LPAR.
+ *
+ * On LPAR, the watchdog will always trigger a system restart. the module
+ * paramter cmd is meaningless here.
+ *
+ *
+ * Copyright IBM Corp. 2004, 2013
+ * Author(s): Arnd Bergmann (arndb@de.ibm.com)
+ *           Philipp Hachtmann (phacht@de.ibm.com)
+ *
+ */
+
+#define KMSG_COMPONENT "diag288_wdt"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/watchdog.h>
+#include <linux/suspend.h>
+#include <asm/ebcdic.h>
+#include <linux/io.h>
+#include <linux/uaccess.h>
+
+#define MAX_CMDLEN 240
+#define DEFAULT_CMD "SYSTEM RESTART"
+
+#define MIN_INTERVAL 15     /* Minimal time supported by diag88 */
+#define MAX_INTERVAL 3600   /* One hour should be enough - pure estimation */
+
+#define WDT_DEFAULT_TIMEOUT 30
+
+/* Function codes - init, change, cancel */
+#define WDT_FUNC_INIT 0
+#define WDT_FUNC_CHANGE 1
+#define WDT_FUNC_CANCEL 2
+#define WDT_FUNC_CONCEAL 0x80000000
+
+/* Action codes for LPAR watchdog */
+#define LPARWDT_RESTART 0
+
+static char wdt_cmd[MAX_CMDLEN] = DEFAULT_CMD;
+static bool conceal_on;
+static bool nowayout_info = WATCHDOG_NOWAYOUT;
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
+MODULE_AUTHOR("Philipp Hachtmann <phacht@de.ibm.com>");
+
+MODULE_DESCRIPTION("System z diag288  Watchdog Timer");
+
+module_param_string(cmd, wdt_cmd, MAX_CMDLEN, 0644);
+MODULE_PARM_DESC(cmd, "CP command that is run when the watchdog triggers (z/VM only)");
+
+module_param_named(conceal, conceal_on, bool, 0644);
+MODULE_PARM_DESC(conceal, "Enable the CONCEAL CP option while the watchdog is active (z/VM only)");
+
+module_param_named(nowayout, nowayout_info, bool, 0444);
+MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default = CONFIG_WATCHDOG_NOWAYOUT)");
+
+MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
+MODULE_ALIAS("vmwatchdog");
+
+static int __diag288(unsigned int func, unsigned int timeout,
+                    unsigned long action, unsigned int len)
+{
+       register unsigned long __func asm("2") = func;
+       register unsigned long __timeout asm("3") = timeout;
+       register unsigned long __action asm("4") = action;
+       register unsigned long __len asm("5") = len;
+       int err;
+
+       err = -EINVAL;
+       asm volatile(
+               "       diag    %1, %3, 0x288\n"
+               "0:     la      %0, 0\n"
+               "1:\n"
+               EX_TABLE(0b, 1b)
+               : "+d" (err) : "d"(__func), "d"(__timeout),
+                 "d"(__action), "d"(__len) : "1", "cc");
+       return err;
+}
+
+static int __diag288_vm(unsigned int  func, unsigned int timeout,
+                       char *cmd, size_t len)
+{
+       return __diag288(func, timeout, virt_to_phys(cmd), len);
+}
+
+static int __diag288_lpar(unsigned int func, unsigned int timeout,
+                         unsigned long action)
+{
+       return __diag288(func, timeout, action, 0);
+}
+
+static int wdt_start(struct watchdog_device *dev)
+{
+       char *ebc_cmd;
+       size_t len;
+       int ret;
+       unsigned int func;
+
+       ret = -ENODEV;
+
+       if (MACHINE_IS_VM) {
+               ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+               if (!ebc_cmd)
+                       return -ENOMEM;
+               len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
+               ASCEBC(ebc_cmd, MAX_CMDLEN);
+               EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+               func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
+                       : WDT_FUNC_INIT;
+               ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+               WARN_ON(ret != 0);
+               kfree(ebc_cmd);
+       }
+
+       if (MACHINE_IS_LPAR) {
+               ret = __diag288_lpar(WDT_FUNC_INIT,
+                                    dev->timeout, LPARWDT_RESTART);
+       }
+
+       if (ret) {
+               pr_err("The watchdog cannot be activated\n");
+               return ret;
+       }
+       pr_info("The watchdog was activated\n");
+       return 0;
+}
+
+static int wdt_stop(struct watchdog_device *dev)
+{
+       int ret;
+
+       ret = __diag288(WDT_FUNC_CANCEL, 0, 0, 0);
+       pr_info("The watchdog was deactivated\n");
+       return ret;
+}
+
+static int wdt_ping(struct watchdog_device *dev)
+{
+       char *ebc_cmd;
+       size_t len;
+       int ret;
+       unsigned int func;
+
+       ret = -ENODEV;
+
+       if (MACHINE_IS_VM) {
+               ebc_cmd = kmalloc(MAX_CMDLEN, GFP_KERNEL);
+               if (!ebc_cmd)
+                       return -ENOMEM;
+               len = strlcpy(ebc_cmd, wdt_cmd, MAX_CMDLEN);
+               ASCEBC(ebc_cmd, MAX_CMDLEN);
+               EBC_TOUPPER(ebc_cmd, MAX_CMDLEN);
+
+               /*
+                * It seems to be ok to z/VM to use the init function to
+                * retrigger the watchdog. On LPAR WDT_FUNC_CHANGE must
+                * be used when the watchdog is running.
+                */
+               func = conceal_on ? (WDT_FUNC_INIT | WDT_FUNC_CONCEAL)
+                       : WDT_FUNC_INIT;
+
+               ret = __diag288_vm(func, dev->timeout, ebc_cmd, len);
+               WARN_ON(ret != 0);
+               kfree(ebc_cmd);
+       }
+
+       if (MACHINE_IS_LPAR)
+               ret = __diag288_lpar(WDT_FUNC_CHANGE, dev->timeout, 0);
+
+       if (ret)
+               pr_err("The watchdog timer cannot be started or reset\n");
+       return ret;
+}
+
+static int wdt_set_timeout(struct watchdog_device * dev, unsigned int new_to)
+{
+       dev->timeout = new_to;
+       return wdt_ping(dev);
+}
+
+static struct watchdog_ops wdt_ops = {
+       .owner = THIS_MODULE,
+       .start = wdt_start,
+       .stop = wdt_stop,
+       .ping = wdt_ping,
+       .set_timeout = wdt_set_timeout,
+};
+
+static struct watchdog_info wdt_info = {
+       .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
+       .firmware_version = 0,
+       .identity = "z Watchdog",
+};
+
+static struct watchdog_device wdt_dev = {
+       .parent = NULL,
+       .info = &wdt_info,
+       .ops = &wdt_ops,
+       .bootstatus = 0,
+       .timeout = WDT_DEFAULT_TIMEOUT,
+       .min_timeout = MIN_INTERVAL,
+       .max_timeout = MAX_INTERVAL,
+};
+
+/*
+ * It makes no sense to go into suspend while the watchdog is running.
+ * Depending on the memory size, the watchdog might trigger, while we
+ * are still saving the memory.
+ * We reuse the open flag to ensure that suspend and watchdog open are
+ * exclusive operations
+ */
+static int wdt_suspend(void)
+{
+       if (test_and_set_bit(WDOG_DEV_OPEN, &wdt_dev.status)) {
+               pr_err("Linux cannot be suspended while the watchdog is in use\n");
+               return notifier_from_errno(-EBUSY);
+       }
+       if (test_bit(WDOG_ACTIVE, &wdt_dev.status)) {
+               clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
+               pr_err("Linux cannot be suspended while the watchdog is in use\n");
+               return notifier_from_errno(-EBUSY);
+       }
+       return NOTIFY_DONE;
+}
+
+static int wdt_resume(void)
+{
+       clear_bit(WDOG_DEV_OPEN, &wdt_dev.status);
+       return NOTIFY_DONE;
+}
+
+static int wdt_power_event(struct notifier_block *this, unsigned long event,
+                          void *ptr)
+{
+       switch (event) {
+       case PM_POST_HIBERNATION:
+       case PM_POST_SUSPEND:
+               return wdt_resume();
+       case PM_HIBERNATION_PREPARE:
+       case PM_SUSPEND_PREPARE:
+               return wdt_suspend();
+       default:
+               return NOTIFY_DONE;
+       }
+}
+
+static struct notifier_block wdt_power_notifier = {
+       .notifier_call = wdt_power_event,
+};
+
+static int __init diag288_init(void)
+{
+       int ret;
+       char ebc_begin[] = {
+               194, 197, 199, 201, 213
+       };
+
+       watchdog_set_nowayout(&wdt_dev, nowayout_info);
+
+       if (MACHINE_IS_VM) {
+               pr_info("The watchdog device driver detected a z/VM environment\n");
+               if (__diag288_vm(WDT_FUNC_INIT, 15,
+                                ebc_begin, sizeof(ebc_begin)) != 0) {
+                       pr_err("The watchdog cannot be initialized\n");
+                       return -EINVAL;
+               }
+       } else if (MACHINE_IS_LPAR) {
+               pr_info("The watchdog device driver detected an LPAR environment\n");
+               if (__diag288_lpar(WDT_FUNC_INIT, 30, LPARWDT_RESTART)) {
+                       pr_err("The watchdog cannot be initialized\n");
+                       return -EINVAL;
+               }
+       } else {
+               pr_err("Linux runs in an environment that does not support the diag288 watchdog\n");
+               return -ENODEV;
+       }
+
+       if (__diag288_lpar(WDT_FUNC_CANCEL, 0, 0)) {
+               pr_err("The watchdog cannot be deactivated\n");
+               return -EINVAL;
+       }
+
+       ret = register_pm_notifier(&wdt_power_notifier);
+       if (ret)
+               return ret;
+
+       ret = watchdog_register_device(&wdt_dev);
+       if (ret)
+               unregister_pm_notifier(&wdt_power_notifier);
+
+       return ret;
+}
+
+static void __exit diag288_exit(void)
+{
+       watchdog_unregister_device(&wdt_dev);
+       unregister_pm_notifier(&wdt_power_notifier);
+}
+
+module_init(diag288_init);
+module_exit(diag288_exit);
index b7a506f..5c660c7 100644 (file)
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
                 * p2m are consistent.
                 */
                if (!xen_feature(XENFEAT_auto_translated_physmap)) {
-                       unsigned long p;
-                       struct page   *scratch_page = get_balloon_scratch_page();
-
                        if (!PageHighMem(page)) {
+                               struct page *scratch_page = get_balloon_scratch_page();
+
                                ret = HYPERVISOR_update_va_mapping(
                                                (unsigned long)__va(pfn << PAGE_SHIFT),
                                                pfn_pte(page_to_pfn(scratch_page),
                                                        PAGE_KERNEL_RO), 0);
                                BUG_ON(ret);
-                       }
-                       p = page_to_pfn(scratch_page);
-                       __set_phys_to_machine(pfn, pfn_to_mfn(p));
 
-                       put_balloon_scratch_page();
+                               put_balloon_scratch_page();
+                       }
+                       __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
                }
 #endif
 
index 6d325bd..eeba754 100644 (file)
@@ -1168,7 +1168,8 @@ int gnttab_resume(void)
 
 int gnttab_suspend(void)
 {
-       gnttab_interface->unmap_frames();
+       if (!xen_feature(XENFEAT_auto_translated_physmap))
+               gnttab_interface->unmap_frames();
        return 0;
 }
 
@@ -1194,18 +1195,20 @@ static int gnttab_expand(unsigned int req_entries)
 int gnttab_init(void)
 {
        int i;
+       unsigned long max_nr_grant_frames;
        unsigned int max_nr_glist_frames, nr_glist_frames;
        unsigned int nr_init_grefs;
        int ret;
 
        gnttab_request_version();
+       max_nr_grant_frames = gnttab_max_grant_frames();
        nr_grant_frames = 1;
 
        /* Determine the maximum number of frames required for the
         * grant reference free list on the current hypervisor.
         */
        BUG_ON(grefs_per_grant_frame == 0);
-       max_nr_glist_frames = (gnttab_max_grant_frames() *
+       max_nr_glist_frames = (max_nr_grant_frames *
                               grefs_per_grant_frame / RPP);
 
        gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
@@ -1222,6 +1225,11 @@ int gnttab_init(void)
                }
        }
 
+       ret = arch_gnttab_init(max_nr_grant_frames,
+                              nr_status_frames(max_nr_grant_frames));
+       if (ret < 0)
+               goto ini_nomem;
+
        if (gnttab_setup() < 0) {
                ret = -ENODEV;
                goto ini_nomem;
index c3667b2..5f1e1f3 100644 (file)
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
 
        if (!si->cancelled) {
                xen_irq_resume();
-               xen_console_resume();
                xen_timer_resume();
        }
 
@@ -135,6 +134,10 @@ static void do_suspend(void)
 
        err = stop_machine(xen_suspend, &si, cpumask_of(0));
 
+       /* Resume console as early as possible. */
+       if (!si.cancelled)
+               xen_console_resume();
+
        raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
 
        dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
index 5747417..0862d34 100644 (file)
@@ -219,6 +219,12 @@ $(obj)/%.fw: $(obj)/%.H16 $(ihex2fw_dep)
 obj-y                           += $(patsubst %,%.gen.o, $(fw-external-y))
 obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y))
 
+ifeq ($(KBUILD_SRC),)
+# Makefile.build only creates subdirectories for O= builds, but external
+# firmware might live outside the kernel source tree
+_dummy := $(foreach d,$(addprefix $(obj)/,$(dir $(fw-external-y))), $(shell [ -d $(d) ] || mkdir -p $(d)))
+endif
+
 # Remove .S files and binaries created from ihex
 # (during 'make clean' .config isn't included so they're all in $(fw-shipped-))
 targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \
index 42dd2e4..35de0c0 100644 (file)
@@ -55,13 +55,13 @@ static int __init afs_get_client_UUID(void)
        afs_uuid.time_low = uuidtime;
        afs_uuid.time_mid = uuidtime >> 32;
        afs_uuid.time_hi_and_version = (uuidtime >> 48) & AFS_UUID_TIMEHI_MASK;
-       afs_uuid.time_hi_and_version = AFS_UUID_VERSION_TIME;
+       afs_uuid.time_hi_and_version |= AFS_UUID_VERSION_TIME;
 
        get_random_bytes(&clockseq, 2);
        afs_uuid.clock_seq_low = clockseq;
        afs_uuid.clock_seq_hi_and_reserved =
                (clockseq >> 8) & AFS_UUID_CLOCKHI_MASK;
-       afs_uuid.clock_seq_hi_and_reserved = AFS_UUID_VARIANT_STD;
+       afs_uuid.clock_seq_hi_and_reserved |= AFS_UUID_VARIANT_STD;
 
        _debug("AFS UUID: %08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x",
               afs_uuid.time_low,
index 4f078c0..1c9c5f0 100644 (file)
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
 static void put_reqs_available(struct kioctx *ctx, unsigned nr)
 {
        struct kioctx_cpu *kcpu;
+       unsigned long flags;
 
        preempt_disable();
        kcpu = this_cpu_ptr(ctx->cpu);
 
+       local_irq_save(flags);
        kcpu->reqs_available += nr;
+
        while (kcpu->reqs_available >= ctx->req_batch * 2) {
                kcpu->reqs_available -= ctx->req_batch;
                atomic_add(ctx->req_batch, &ctx->reqs_available);
        }
 
+       local_irq_restore(flags);
        preempt_enable();
 }
 
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
 {
        struct kioctx_cpu *kcpu;
        bool ret = false;
+       unsigned long flags;
 
        preempt_disable();
        kcpu = this_cpu_ptr(ctx->cpu);
 
+       local_irq_save(flags);
        if (!kcpu->reqs_available) {
                int old, avail = atomic_read(&ctx->reqs_available);
 
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
        ret = true;
        kcpu->reqs_available--;
 out:
+       local_irq_restore(flags);
        preempt_enable();
        return ret;
 }
@@ -1021,6 +1028,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
 
        /* everything turned out well, dispose of the aiocb. */
        kiocb_free(iocb);
+       put_reqs_available(ctx, 1);
 
        /*
         * We have to order our ring_info tail store above and test
@@ -1062,6 +1070,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
        if (head == tail)
                goto out;
 
+       head %= ctx->nr_events;
+       tail %= ctx->nr_events;
+
        while (ret < nr) {
                long avail;
                struct io_event *ev;
@@ -1100,8 +1111,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
        flush_dcache_page(ctx->ring_pages[0]);
 
        pr_debug("%li  h%u t%u\n", ret, head, tail);
-
-       put_reqs_available(ctx, ret);
 out:
        mutex_unlock(&ctx->ring_lock);
 
index d7bd395..1c55388 100644 (file)
@@ -210,7 +210,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
        int pipefd;
        struct autofs_sb_info *sbi;
        struct autofs_info *ino;
-       int pgrp;
+       int pgrp = 0;
        bool pgrp_set = false;
        int ret = -EINVAL;
 
index 92371c4..1daea0b 100644 (file)
@@ -821,7 +821,7 @@ static void free_workspace(int type, struct list_head *workspace)
 
        spin_lock(workspace_lock);
        if (*num_workspace < num_online_cpus()) {
-               list_add_tail(workspace, idle_workspace);
+               list_add(workspace, idle_workspace);
                (*num_workspace)++;
                spin_unlock(workspace_lock);
                goto wake;
index b7e2c1c..be91397 100644 (file)
@@ -1259,11 +1259,19 @@ struct btrfs_block_group_cache {
        spinlock_t lock;
        u64 pinned;
        u64 reserved;
+       u64 delalloc_bytes;
        u64 bytes_super;
        u64 flags;
        u64 sectorsize;
        u64 cache_generation;
 
+       /*
+        * It is just used for the delayed data space allocation because
+        * only the data space allocation and the relative metadata update
+        * can be done cross the transaction.
+        */
+       struct rw_semaphore data_rwsem;
+
        /* for raid56, this is a full stripe, without parity */
        unsigned long full_stripe_len;
 
@@ -3316,7 +3324,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
                                   struct btrfs_key *ins);
 int btrfs_reserve_extent(struct btrfs_root *root, u64 num_bytes,
                         u64 min_alloc_size, u64 empty_size, u64 hint_byte,
-                        struct btrfs_key *ins, int is_data);
+                        struct btrfs_key *ins, int is_data, int delalloc);
 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
                  struct extent_buffer *buf, int full_backref, int no_quota);
 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
@@ -3330,7 +3338,8 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans,
                      u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
                      u64 owner, u64 offset, int no_quota);
 
-int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len);
+int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len,
+                              int delalloc);
 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
                                       u64 start, u64 len);
 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
index 2af6e66..eea26e1 100644 (file)
@@ -36,6 +36,7 @@
 #include "check-integrity.h"
 #include "rcu-string.h"
 #include "dev-replace.h"
+#include "sysfs.h"
 
 static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
                                       int scrub_ret);
@@ -562,6 +563,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
                fs_info->fs_devices->latest_bdev = tgt_device->bdev;
        list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
 
+       /* replace the sysfs entry */
+       btrfs_kobj_rm_device(fs_info, src_device);
+       btrfs_kobj_add_device(fs_info, tgt_device);
+
        btrfs_rm_dev_replace_blocked(fs_info);
 
        btrfs_rm_dev_replace_srcdev(fs_info, src_device);
index 8bb4aa1..08e65e9 100644 (file)
@@ -369,7 +369,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
 out:
        unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
                             &cached_state, GFP_NOFS);
-       btrfs_tree_read_unlock_blocking(eb);
+       if (need_lock)
+               btrfs_tree_read_unlock_blocking(eb);
        return ret;
 }
 
@@ -2904,7 +2905,9 @@ retry_root_backup:
                if (ret)
                        goto fail_qgroup;
 
+               mutex_lock(&fs_info->cleaner_mutex);
                ret = btrfs_recover_relocation(tree_root);
+               mutex_unlock(&fs_info->cleaner_mutex);
                if (ret < 0) {
                        printk(KERN_WARNING
                               "BTRFS: failed to recover relocation\n");
index fafb3e5..813537f 100644 (file)
@@ -105,7 +105,8 @@ static int find_next_key(struct btrfs_path *path, int level,
 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
                            int dump_block_groups);
 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve);
+                                      u64 num_bytes, int reserve,
+                                      int delalloc);
 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
                               u64 num_bytes);
 int btrfs_pin_extent(struct btrfs_root *root,
@@ -3260,7 +3261,8 @@ again:
 
        spin_lock(&block_group->lock);
        if (block_group->cached != BTRFS_CACHE_FINISHED ||
-           !btrfs_test_opt(root, SPACE_CACHE)) {
+           !btrfs_test_opt(root, SPACE_CACHE) ||
+           block_group->delalloc_bytes) {
                /*
                 * don't bother trying to write stuff out _if_
                 * a) we're not cached,
@@ -5613,6 +5615,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
  * @cache:     The cache we are manipulating
  * @num_bytes: The number of bytes in question
  * @reserve:   One of the reservation enums
+ * @delalloc:   The blocks are allocated for the delalloc write
  *
  * This is called by the allocator when it reserves space, or by somebody who is
  * freeing space that was never actually used on disk.  For example if you
@@ -5631,7 +5634,7 @@ int btrfs_exclude_logged_extents(struct btrfs_root *log,
  * succeeds.
  */
 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
-                                      u64 num_bytes, int reserve)
+                                      u64 num_bytes, int reserve, int delalloc)
 {
        struct btrfs_space_info *space_info = cache->space_info;
        int ret = 0;
@@ -5650,12 +5653,18 @@ static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
                                                num_bytes, 0);
                                space_info->bytes_may_use -= num_bytes;
                        }
+
+                       if (delalloc)
+                               cache->delalloc_bytes += num_bytes;
                }
        } else {
                if (cache->ro)
                        space_info->bytes_readonly += num_bytes;
                cache->reserved -= num_bytes;
                space_info->bytes_reserved -= num_bytes;
+
+               if (delalloc)
+                       cache->delalloc_bytes -= num_bytes;
        }
        spin_unlock(&cache->lock);
        spin_unlock(&space_info->lock);
@@ -5669,7 +5678,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
        struct btrfs_caching_control *next;
        struct btrfs_caching_control *caching_ctl;
        struct btrfs_block_group_cache *cache;
-       struct btrfs_space_info *space_info;
 
        down_write(&fs_info->commit_root_sem);
 
@@ -5692,9 +5700,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
 
        up_write(&fs_info->commit_root_sem);
 
-       list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
-               percpu_counter_set(&space_info->total_bytes_pinned, 0);
-
        update_global_block_rsv(fs_info);
 }
 
@@ -5732,6 +5737,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
                spin_lock(&cache->lock);
                cache->pinned -= len;
                space_info->bytes_pinned -= len;
+               percpu_counter_add(&space_info->total_bytes_pinned, -len);
                if (cache->ro) {
                        space_info->bytes_readonly += len;
                        readonly = true;
@@ -6206,7 +6212,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
                WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
 
                btrfs_add_free_space(cache, buf->start, buf->len);
-               btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE);
+               btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
                trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
                pin = 0;
        }
@@ -6365,6 +6371,70 @@ enum btrfs_loop_type {
        LOOP_NO_EMPTY_SIZE = 3,
 };
 
+static inline void
+btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
+                      int delalloc)
+{
+       if (delalloc)
+               down_read(&cache->data_rwsem);
+}
+
+static inline void
+btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
+                      int delalloc)
+{
+       btrfs_get_block_group(cache);
+       if (delalloc)
+               down_read(&cache->data_rwsem);
+}
+
+static struct btrfs_block_group_cache *
+btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
+                  struct btrfs_free_cluster *cluster,
+                  int delalloc)
+{
+       struct btrfs_block_group_cache *used_bg;
+       bool locked = false;
+again:
+       spin_lock(&cluster->refill_lock);
+       if (locked) {
+               if (used_bg == cluster->block_group)
+                       return used_bg;
+
+               up_read(&used_bg->data_rwsem);
+               btrfs_put_block_group(used_bg);
+       }
+
+       used_bg = cluster->block_group;
+       if (!used_bg)
+               return NULL;
+
+       if (used_bg == block_group)
+               return used_bg;
+
+       btrfs_get_block_group(used_bg);
+
+       if (!delalloc)
+               return used_bg;
+
+       if (down_read_trylock(&used_bg->data_rwsem))
+               return used_bg;
+
+       spin_unlock(&cluster->refill_lock);
+       down_read(&used_bg->data_rwsem);
+       locked = true;
+       goto again;
+}
+
+static inline void
+btrfs_release_block_group(struct btrfs_block_group_cache *cache,
+                        int delalloc)
+{
+       if (delalloc)
+               up_read(&cache->data_rwsem);
+       btrfs_put_block_group(cache);
+}
+
 /*
  * walks the btree of allocated extents and find a hole of a given size.
  * The key ins is changed to record the hole:
@@ -6379,7 +6449,7 @@ enum btrfs_loop_type {
 static noinline int find_free_extent(struct btrfs_root *orig_root,
                                     u64 num_bytes, u64 empty_size,
                                     u64 hint_byte, struct btrfs_key *ins,
-                                    u64 flags)
+                                    u64 flags, int delalloc)
 {
        int ret = 0;
        struct btrfs_root *root = orig_root->fs_info->extent_root;
@@ -6467,6 +6537,7 @@ static noinline int find_free_extent(struct btrfs_root *orig_root,
                                up_read(&space_info->groups_sem);
                        } else {
                                index = get_block_group_index(block_group);
+                               btrfs_lock_block_group(block_group, delalloc);
                                goto have_block_group;
                        }
                } else if (block_group) {
@@ -6481,7 +6552,7 @@ search:
                u64 offset;
                int cached;
 
-               btrfs_get_block_group(block_group);
+               btrfs_grab_block_group(block_group, delalloc);
                search_start = block_group->key.objectid;
 
                /*
@@ -6529,16 +6600,16 @@ have_block_group:
                         * the refill lock keeps out other
                         * people trying to start a new cluster
                         */
-                       spin_lock(&last_ptr->refill_lock);
-                       used_block_group = last_ptr->block_group;
-                       if (used_block_group != block_group &&
-                           (!used_block_group ||
-                            used_block_group->ro ||
-                            !block_group_bits(used_block_group, flags)))
+                       used_block_group = btrfs_lock_cluster(block_group,
+                                                             last_ptr,
+                                                             delalloc);
+                       if (!used_block_group)
                                goto refill_cluster;
 
-                       if (used_block_group != block_group)
-                               btrfs_get_block_group(used_block_group);
+                       if (used_block_group != block_group &&
+                           (used_block_group->ro ||
+                            !block_group_bits(used_block_group, flags)))
+                               goto release_cluster;
 
                        offset = btrfs_alloc_from_cluster(used_block_group,
                                                last_ptr,
@@ -6552,16 +6623,15 @@ have_block_group:
                                                used_block_group,
                                                search_start, num_bytes);
                                if (used_block_group != block_group) {
-                                       btrfs_put_block_group(block_group);
+                                       btrfs_release_block_group(block_group,
+                                                                 delalloc);
                                        block_group = used_block_group;
                                }
                                goto checks;
                        }
 
                        WARN_ON(last_ptr->block_group != used_block_group);
-                       if (used_block_group != block_group)
-                               btrfs_put_block_group(used_block_group);
-refill_cluster:
+release_cluster:
                        /* If we are on LOOP_NO_EMPTY_SIZE, we can't
                         * set up a new clusters, so lets just skip it
                         * and let the allocator find whatever block
@@ -6578,8 +6648,10 @@ refill_cluster:
                         * succeeding in the unclustered
                         * allocation.  */
                        if (loop >= LOOP_NO_EMPTY_SIZE &&
-                           last_ptr->block_group != block_group) {
+                           used_block_group != block_group) {
                                spin_unlock(&last_ptr->refill_lock);
+                               btrfs_release_block_group(used_block_group,
+                                                         delalloc);
                                goto unclustered_alloc;
                        }
 
@@ -6589,6 +6661,10 @@ refill_cluster:
                         */
                        btrfs_return_cluster_to_free_space(NULL, last_ptr);
 
+                       if (used_block_group != block_group)
+                               btrfs_release_block_group(used_block_group,
+                                                         delalloc);
+refill_cluster:
                        if (loop >= LOOP_NO_EMPTY_SIZE) {
                                spin_unlock(&last_ptr->refill_lock);
                                goto unclustered_alloc;
@@ -6696,7 +6772,7 @@ checks:
                BUG_ON(offset > search_start);
 
                ret = btrfs_update_reserved_bytes(block_group, num_bytes,
-                                                 alloc_type);
+                                                 alloc_type, delalloc);
                if (ret == -EAGAIN) {
                        btrfs_add_free_space(block_group, offset, num_bytes);
                        goto loop;
@@ -6708,13 +6784,13 @@ checks:
 
                trace_btrfs_reserve_extent(orig_root, block_group,
                                           search_start, num_bytes);
-               btrfs_put_block_group(block_group);
+               btrfs_release_block_group(block_group, delalloc);
                break;
 loop:
                failed_cluster_refill = false;
                failed_alloc = false;
                BUG_ON(index != get_block_group_index(block_group));
-               btrfs_put_block_group(block_group);
+               btrfs_release_block_group(block_group, delalloc);
        }
        up_read(&space_info->groups_sem);
 
@@ -6827,7 +6903,7 @@ again:
 int btrfs_reserve_extent(struct btrfs_root *root,
                         u64 num_bytes, u64 min_alloc_size,
                         u64 empty_size, u64 hint_byte,
-                        struct btrfs_key *ins, int is_data)
+                        struct btrfs_key *ins, int is_data, int delalloc)
 {
        bool final_tried = false;
        u64 flags;
@@ -6837,7 +6913,7 @@ int btrfs_reserve_extent(struct btrfs_root *root,
 again:
        WARN_ON(num_bytes < root->sectorsize);
        ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
-                              flags);
+                              flags, delalloc);
 
        if (ret == -ENOSPC) {
                if (!final_tried && ins->offset) {
@@ -6862,7 +6938,8 @@ again:
 }
 
 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
-                                       u64 start, u64 len, int pin)
+                                       u64 start, u64 len,
+                                       int pin, int delalloc)
 {
        struct btrfs_block_group_cache *cache;
        int ret = 0;
@@ -6881,7 +6958,7 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
                pin_down_extent(root, cache, start, len, 1);
        else {
                btrfs_add_free_space(cache, start, len);
-               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE);
+               btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
        }
        btrfs_put_block_group(cache);
 
@@ -6891,15 +6968,15 @@ static int __btrfs_free_reserved_extent(struct btrfs_root *root,
 }
 
 int btrfs_free_reserved_extent(struct btrfs_root *root,
-                                       u64 start, u64 len)
+                              u64 start, u64 len, int delalloc)
 {
-       return __btrfs_free_reserved_extent(root, start, len, 0);
+       return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
 }
 
 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
                                       u64 start, u64 len)
 {
-       return __btrfs_free_reserved_extent(root, start, len, 1);
+       return __btrfs_free_reserved_extent(root, start, len, 1, 0);
 }
 
 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
@@ -7114,7 +7191,7 @@ int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
                return -EINVAL;
 
        ret = btrfs_update_reserved_bytes(block_group, ins->offset,
-                                         RESERVE_ALLOC_NO_ACCOUNT);
+                                         RESERVE_ALLOC_NO_ACCOUNT, 0);
        BUG_ON(ret); /* logic error */
        ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
                                         0, owner, offset, ins, 1);
@@ -7256,7 +7333,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
                return ERR_CAST(block_rsv);
 
        ret = btrfs_reserve_extent(root, blocksize, blocksize,
-                                  empty_size, hint, &ins, 0);
+                                  empty_size, hint, &ins, 0, 0);
        if (ret) {
                unuse_block_rsv(root->fs_info, block_rsv, blocksize);
                return ERR_PTR(ret);
@@ -8659,6 +8736,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
                                               start);
        atomic_set(&cache->count, 1);
        spin_lock_init(&cache->lock);
+       init_rwsem(&cache->data_rwsem);
        INIT_LIST_HEAD(&cache->list);
        INIT_LIST_HEAD(&cache->cluster_list);
        INIT_LIST_HEAD(&cache->new_bg_list);
index 15ce5f2..ccc264e 100644 (file)
@@ -158,7 +158,6 @@ struct extent_buffer {
         * to unlock
         */
        wait_queue_head_t read_lock_wq;
-       wait_queue_head_t lock_wq;
        struct page *pages[INLINE_EXTENT_BUFFER_PAGES];
 #ifdef CONFIG_BTRFS_DEBUG
        struct list_head leak_list;
index 1874aee..225302b 100644 (file)
@@ -75,6 +75,8 @@ void free_extent_map(struct extent_map *em)
        if (atomic_dec_and_test(&em->refs)) {
                WARN_ON(extent_map_in_tree(em));
                WARN_ON(!list_empty(&em->list));
+               if (test_bit(EXTENT_FLAG_FS_MAPPING, &em->flags))
+                       kfree(em->bdev);
                kmem_cache_free(extent_map_cache, em);
        }
 }
index e7fd8a5..b2991fd 100644 (file)
@@ -15,6 +15,7 @@
 #define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
 #define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
 #define EXTENT_FLAG_FILLING 5 /* Filling in a preallocated extent */
+#define EXTENT_FLAG_FS_MAPPING 6 /* filesystem extent mapping type */
 
 struct extent_map {
        struct rb_node rb_node;
index 372b05f..2b0a627 100644 (file)
@@ -274,18 +274,32 @@ struct io_ctl {
 };
 
 static int io_ctl_init(struct io_ctl *io_ctl, struct inode *inode,
-                      struct btrfs_root *root)
+                      struct btrfs_root *root, int write)
 {
+       int num_pages;
+       int check_crcs = 0;
+
+       num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
+                   PAGE_CACHE_SHIFT;
+
+       if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
+               check_crcs = 1;
+
+       /* Make sure we can fit our crcs into the first page */
+       if (write && check_crcs &&
+           (num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
+               return -ENOSPC;
+
        memset(io_ctl, 0, sizeof(struct io_ctl));
-       io_ctl->num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >>
-               PAGE_CACHE_SHIFT;
-       io_ctl->pages = kzalloc(sizeof(struct page *) * io_ctl->num_pages,
-                               GFP_NOFS);
+
+       io_ctl->pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS);
        if (!io_ctl->pages)
                return -ENOMEM;
+
+       io_ctl->num_pages = num_pages;
        io_ctl->root = root;
-       if (btrfs_ino(inode) != BTRFS_FREE_INO_OBJECTID)
-               io_ctl->check_crcs = 1;
+       io_ctl->check_crcs = check_crcs;
+
        return 0;
 }
 
@@ -666,6 +680,13 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        generation = btrfs_free_space_generation(leaf, header);
        btrfs_release_path(path);
 
+       if (!BTRFS_I(inode)->generation) {
+               btrfs_info(root->fs_info,
+                          "The free space cache file (%llu) is invalid. skip it\n",
+                          offset);
+               return 0;
+       }
+
        if (BTRFS_I(inode)->generation != generation) {
                btrfs_err(root->fs_info,
                        "free space inode generation (%llu) "
@@ -677,7 +698,7 @@ static int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
        if (!num_entries)
                return 0;
 
-       ret = io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root, 0);
        if (ret)
                return ret;
 
@@ -957,19 +978,18 @@ fail:
 }
 
 static noinline_for_stack int
-add_ioctl_entries(struct btrfs_root *root,
-                 struct inode *inode,
-                 struct btrfs_block_group_cache *block_group,
-                 struct io_ctl *io_ctl,
-                 struct extent_state **cached_state,
-                 struct list_head *bitmap_list,
-                 int *entries)
+write_pinned_extent_entries(struct btrfs_root *root,
+                           struct btrfs_block_group_cache *block_group,
+                           struct io_ctl *io_ctl,
+                           int *entries)
 {
        u64 start, extent_start, extent_end, len;
-       struct list_head *pos, *n;
        struct extent_io_tree *unpin = NULL;
        int ret;
 
+       if (!block_group)
+               return 0;
+
        /*
         * We want to add any pinned extents to our free space cache
         * so we don't leak the space
@@ -979,23 +999,19 @@ add_ioctl_entries(struct btrfs_root *root,
         */
        unpin = root->fs_info->pinned_extents;
 
-       if (block_group)
-               start = block_group->key.objectid;
+       start = block_group->key.objectid;
 
-       while (block_group && (start < block_group->key.objectid +
-                              block_group->key.offset)) {
+       while (start < block_group->key.objectid + block_group->key.offset) {
                ret = find_first_extent_bit(unpin, start,
                                            &extent_start, &extent_end,
                                            EXTENT_DIRTY, NULL);
-               if (ret) {
-                       ret = 0;
-                       break;
-               }
+               if (ret)
+                       return 0;
 
                /* This pinned extent is out of our range */
                if (extent_start >= block_group->key.objectid +
                    block_group->key.offset)
-                       break;
+                       return 0;
 
                extent_start = max(extent_start, start);
                extent_end = min(block_group->key.objectid +
@@ -1005,11 +1021,20 @@ add_ioctl_entries(struct btrfs_root *root,
                *entries += 1;
                ret = io_ctl_add_entry(io_ctl, extent_start, len, NULL);
                if (ret)
-                       goto out_nospc;
+                       return -ENOSPC;
 
                start = extent_end;
        }
 
+       return 0;
+}
+
+static noinline_for_stack int
+write_bitmap_entries(struct io_ctl *io_ctl, struct list_head *bitmap_list)
+{
+       struct list_head *pos, *n;
+       int ret;
+
        /* Write out the bitmaps */
        list_for_each_safe(pos, n, bitmap_list) {
                struct btrfs_free_space *entry =
@@ -1017,36 +1042,24 @@ add_ioctl_entries(struct btrfs_root *root,
 
                ret = io_ctl_add_bitmap(io_ctl, entry->bitmap);
                if (ret)
-                       goto out_nospc;
+                       return -ENOSPC;
                list_del_init(&entry->list);
        }
 
-       /* Zero out the rest of the pages just to make sure */
-       io_ctl_zero_remaining_pages(io_ctl);
-
-       ret = btrfs_dirty_pages(root, inode, io_ctl->pages, io_ctl->num_pages,
-                               0, i_size_read(inode), cached_state);
-       io_ctl_drop_pages(io_ctl);
-       unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
-                            i_size_read(inode) - 1, cached_state, GFP_NOFS);
+       return 0;
+}
 
-       if (ret)
-               goto fail;
+static int flush_dirty_cache(struct inode *inode)
+{
+       int ret;
 
        ret = btrfs_wait_ordered_range(inode, 0, (u64)-1);
-       if (ret) {
+       if (ret)
                clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, inode->i_size - 1,
                                 EXTENT_DIRTY | EXTENT_DELALLOC, 0, 0, NULL,
                                 GFP_NOFS);
-               goto fail;
-       }
-       return 0;
 
-fail:
-       return -1;
-
-out_nospc:
-       return -ENOSPC;
+       return ret;
 }
 
 static void noinline_for_stack
@@ -1056,6 +1069,7 @@ cleanup_write_cache_enospc(struct inode *inode,
                           struct list_head *bitmap_list)
 {
        struct list_head *pos, *n;
+
        list_for_each_safe(pos, n, bitmap_list) {
                struct btrfs_free_space *entry =
                        list_entry(pos, struct btrfs_free_space, list);
@@ -1088,64 +1102,104 @@ static int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
 {
        struct extent_state *cached_state = NULL;
        struct io_ctl io_ctl;
-       struct list_head bitmap_list;
+       LIST_HEAD(bitmap_list);
        int entries = 0;
        int bitmaps = 0;
        int ret;
-       int err = -1;
-
-       INIT_LIST_HEAD(&bitmap_list);
 
        if (!i_size_read(inode))
                return -1;
 
-       ret = io_ctl_init(&io_ctl, inode, root);
+       ret = io_ctl_init(&io_ctl, inode, root, 1);
        if (ret)
                return -1;
 
+       if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA)) {
+               down_write(&block_group->data_rwsem);
+               spin_lock(&block_group->lock);
+               if (block_group->delalloc_bytes) {
+                       block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+                       spin_unlock(&block_group->lock);
+                       up_write(&block_group->data_rwsem);
+                       BTRFS_I(inode)->generation = 0;
+                       ret = 0;
+                       goto out;
+               }
+               spin_unlock(&block_group->lock);
+       }
+
        /* Lock all pages first so we can lock the extent safely. */
        io_ctl_prepare_pages(&io_ctl, inode, 0);
 
        lock_extent_bits(&BTRFS_I(inode)->io_tree, 0, i_size_read(inode) - 1,
                         0, &cached_state);
 
-
-       /* Make sure we can fit our crcs into the first page */
-       if (io_ctl.check_crcs &&
-           (io_ctl.num_pages * sizeof(u32)) >= PAGE_CACHE_SIZE)
-               goto out_nospc;
-
        io_ctl_set_generation(&io_ctl, trans->transid);
 
+       /* Write out the extent entries in the free space cache */
        ret = write_cache_extent_entries(&io_ctl, ctl,
                                         block_group, &entries, &bitmaps,
                                         &bitmap_list);
        if (ret)
                goto out_nospc;
 
-       ret = add_ioctl_entries(root, inode, block_group, &io_ctl,
-                               &cached_state, &bitmap_list, &entries);
+       /*
+        * Some spaces that are freed in the current transaction are pinned,
+        * they will be added into free space cache after the transaction is
+        * committed, we shouldn't lose them.
+        */
+       ret = write_pinned_extent_entries(root, block_group, &io_ctl, &entries);
+       if (ret)
+               goto out_nospc;
 
-       if (ret == -ENOSPC)
+       /* At last, we write out all the bitmaps. */
+       ret = write_bitmap_entries(&io_ctl, &bitmap_list);
+       if (ret)
                goto out_nospc;
-       else if (ret)
+
+       /* Zero out the rest of the pages just to make sure */
+       io_ctl_zero_remaining_pages(&io_ctl);
+
+       /* Everything is written out, now we dirty the pages in the file. */
+       ret = btrfs_dirty_pages(root, inode, io_ctl.pages, io_ctl.num_pages,
+                               0, i_size_read(inode), &cached_state);
+       if (ret)
+               goto out_nospc;
+
+       if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+               up_write(&block_group->data_rwsem);
+       /*
+        * Release the pages and unlock the extent, we will flush
+        * them out later
+        */
+       io_ctl_drop_pages(&io_ctl);
+
+       unlock_extent_cached(&BTRFS_I(inode)->io_tree, 0,
+                            i_size_read(inode) - 1, &cached_state, GFP_NOFS);
+
+       /* Flush the dirty pages in the cache file. */
+       ret = flush_dirty_cache(inode);
+       if (ret)
                goto out;
 
-       err = update_cache_item(trans, root, inode, path, offset,
+       /* Update the cache item to tell everyone this cache file is valid. */
+       ret = update_cache_item(trans, root, inode, path, offset,
                                entries, bitmaps);
-
 out:
        io_ctl_free(&io_ctl);
-       if (err) {
+       if (ret) {
                invalidate_inode_pages2(inode->i_mapping);
                BTRFS_I(inode)->generation = 0;
        }
        btrfs_update_inode(trans, root, inode);
-       return err;
+       return ret;
 
 out_nospc:
-
        cleanup_write_cache_enospc(inode, &io_ctl, &cached_state, &bitmap_list);
+
+       if (block_group && (block_group->flags & BTRFS_BLOCK_GROUP_DATA))
+               up_write(&block_group->data_rwsem);
+
        goto out;
 }
 
@@ -1165,6 +1219,12 @@ int btrfs_write_out_cache(struct btrfs_root *root,
                spin_unlock(&block_group->lock);
                return 0;
        }
+
+       if (block_group->delalloc_bytes) {
+               block_group->disk_cache_state = BTRFS_DC_WRITTEN;
+               spin_unlock(&block_group->lock);
+               return 0;
+       }
        spin_unlock(&block_group->lock);
 
        inode = lookup_free_space_inode(root, block_group, path);
index 8925f66..3668048 100644 (file)
@@ -693,7 +693,7 @@ retry:
                ret = btrfs_reserve_extent(root,
                                           async_extent->compressed_size,
                                           async_extent->compressed_size,
-                                          0, alloc_hint, &ins, 1);
+                                          0, alloc_hint, &ins, 1, 1);
                if (ret) {
                        int i;
 
@@ -794,7 +794,7 @@ retry:
 out:
        return ret;
 out_free_reserve:
-       btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+       btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_free:
        extent_clear_unlock_delalloc(inode, async_extent->start,
                                     async_extent->start +
@@ -917,7 +917,7 @@ static noinline int cow_file_range(struct inode *inode,
                cur_alloc_size = disk_num_bytes;
                ret = btrfs_reserve_extent(root, cur_alloc_size,
                                           root->sectorsize, 0, alloc_hint,
-                                          &ins, 1);
+                                          &ins, 1, 1);
                if (ret < 0)
                        goto out_unlock;
 
@@ -995,7 +995,7 @@ out:
        return ret;
 
 out_reserve:
-       btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+       btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
 out_unlock:
        extent_clear_unlock_delalloc(inode, start, end, locked_page,
                                     EXTENT_LOCKED | EXTENT_DO_ACCOUNTING |
@@ -2599,6 +2599,21 @@ out_kfree:
        return NULL;
 }
 
+static void btrfs_release_delalloc_bytes(struct btrfs_root *root,
+                                        u64 start, u64 len)
+{
+       struct btrfs_block_group_cache *cache;
+
+       cache = btrfs_lookup_block_group(root->fs_info, start);
+       ASSERT(cache);
+
+       spin_lock(&cache->lock);
+       cache->delalloc_bytes -= len;
+       spin_unlock(&cache->lock);
+
+       btrfs_put_block_group(cache);
+}
+
 /* as ordered data IO finishes, this gets called so we can finish
  * an ordered extent if the range of bytes in the file it covers are
  * fully written.
@@ -2698,6 +2713,10 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
                                                logical_len, logical_len,
                                                compress_type, 0, 0,
                                                BTRFS_FILE_EXTENT_REG);
+               if (!ret)
+                       btrfs_release_delalloc_bytes(root,
+                                                    ordered_extent->start,
+                                                    ordered_extent->disk_len);
        }
        unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
                           ordered_extent->file_offset, ordered_extent->len,
@@ -2750,7 +2769,7 @@ out:
                    !test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags) &&
                    !test_bit(BTRFS_ORDERED_PREALLOC, &ordered_extent->flags))
                        btrfs_free_reserved_extent(root, ordered_extent->start,
-                                                  ordered_extent->disk_len);
+                                                  ordered_extent->disk_len, 1);
        }
 
 
@@ -6535,21 +6554,21 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode,
 
        alloc_hint = get_extent_allocation_hint(inode, start, len);
        ret = btrfs_reserve_extent(root, len, root->sectorsize, 0,
-                                  alloc_hint, &ins, 1);
+                                  alloc_hint, &ins, 1, 1);
        if (ret)
                return ERR_PTR(ret);
 
        em = create_pinned_em(inode, start, ins.offset, start, ins.objectid,
                              ins.offset, ins.offset, ins.offset, 0);
        if (IS_ERR(em)) {
-               btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
                return em;
        }
 
        ret = btrfs_add_ordered_extent_dio(inode, start, ins.objectid,
                                           ins.offset, ins.offset, 0);
        if (ret) {
-               btrfs_free_reserved_extent(root, ins.objectid, ins.offset);
+               btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 1);
                free_extent_map(em);
                return ERR_PTR(ret);
        }
@@ -7437,7 +7456,7 @@ free_ordered:
                if (!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags) &&
                    !test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags))
                        btrfs_free_reserved_extent(root, ordered->start,
-                                                  ordered->disk_len);
+                                                  ordered->disk_len, 1);
                btrfs_put_ordered_extent(ordered);
                btrfs_put_ordered_extent(ordered);
        }
@@ -8808,7 +8827,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                cur_bytes = min(num_bytes, 256ULL * 1024 * 1024);
                cur_bytes = max(cur_bytes, min_size);
                ret = btrfs_reserve_extent(root, cur_bytes, min_size, 0,
-                                          *alloc_hint, &ins, 1);
+                                          *alloc_hint, &ins, 1, 0);
                if (ret) {
                        if (own_trans)
                                btrfs_end_transaction(trans, root);
@@ -8822,7 +8841,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
                                                  BTRFS_FILE_EXTENT_PREALLOC);
                if (ret) {
                        btrfs_free_reserved_extent(root, ins.objectid,
-                                                  ins.offset);
+                                                  ins.offset, 0);
                        btrfs_abort_transaction(trans, root, ret);
                        if (own_trans)
                                btrfs_end_transaction(trans, root);
index 0d321c2..47aceb4 100644 (file)
@@ -136,19 +136,22 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
 void btrfs_update_iflags(struct inode *inode)
 {
        struct btrfs_inode *ip = BTRFS_I(inode);
-
-       inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
+       unsigned int new_fl = 0;
 
        if (ip->flags & BTRFS_INODE_SYNC)
-               inode->i_flags |= S_SYNC;
+               new_fl |= S_SYNC;
        if (ip->flags & BTRFS_INODE_IMMUTABLE)
-               inode->i_flags |= S_IMMUTABLE;
+               new_fl |= S_IMMUTABLE;
        if (ip->flags & BTRFS_INODE_APPEND)
-               inode->i_flags |= S_APPEND;
+               new_fl |= S_APPEND;
        if (ip->flags & BTRFS_INODE_NOATIME)
-               inode->i_flags |= S_NOATIME;
+               new_fl |= S_NOATIME;
        if (ip->flags & BTRFS_INODE_DIRSYNC)
-               inode->i_flags |= S_DIRSYNC;
+               new_fl |= S_DIRSYNC;
+
+       set_mask_bits(&inode->i_flags,
+                     S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
+                     new_fl);
 }
 
 /*
@@ -3139,7 +3142,6 @@ out:
 static void clone_update_extent_map(struct inode *inode,
                                    const struct btrfs_trans_handle *trans,
                                    const struct btrfs_path *path,
-                                   struct btrfs_file_extent_item *fi,
                                    const u64 hole_offset,
                                    const u64 hole_len)
 {
@@ -3154,7 +3156,11 @@ static void clone_update_extent_map(struct inode *inode,
                return;
        }
 
-       if (fi) {
+       if (path) {
+               struct btrfs_file_extent_item *fi;
+
+               fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
+                                   struct btrfs_file_extent_item);
                btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
                em->generation = -1;
                if (btrfs_file_extent_type(path->nodes[0], fi) ==
@@ -3508,18 +3514,15 @@ process_slot:
                                            btrfs_item_ptr_offset(leaf, slot),
                                            size);
                                inode_add_bytes(inode, datal);
-                               extent = btrfs_item_ptr(leaf, slot,
-                                               struct btrfs_file_extent_item);
                        }
 
                        /* If we have an implicit hole (NO_HOLES feature). */
                        if (drop_start < new_key.offset)
                                clone_update_extent_map(inode, trans,
-                                               path, NULL, drop_start,
+                                               NULL, drop_start,
                                                new_key.offset - drop_start);
 
-                       clone_update_extent_map(inode, trans, path,
-                                               extent, 0, 0);
+                       clone_update_extent_map(inode, trans, path, 0, 0);
 
                        btrfs_mark_buffer_dirty(leaf);
                        btrfs_release_path(path);
@@ -3562,12 +3565,10 @@ process_slot:
                        btrfs_end_transaction(trans, root);
                        goto out;
                }
+               clone_update_extent_map(inode, trans, NULL, last_dest_end,
+                                       destoff + len - last_dest_end);
                ret = clone_finish_inode_update(trans, inode, destoff + len,
                                                destoff, olen);
-               if (ret)
-                       goto out;
-               clone_update_extent_map(inode, trans, path, NULL, last_dest_end,
-                                       destoff + len - last_dest_end);
        }
 
 out:
index 01277b8..5665d21 100644 (file)
@@ -33,14 +33,14 @@ static void btrfs_assert_tree_read_locked(struct extent_buffer *eb);
  */
 void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
-       }
+       /*
+        * no lock is required.  The lock owner may change if
+        * we have a read lock, but it won't change to or away
+        * from us.  If we have the write lock, we are the owner
+        * and it'll never change.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner)
+               return;
        if (rw == BTRFS_WRITE_LOCK) {
                if (atomic_read(&eb->blocking_writers) == 0) {
                        WARN_ON(atomic_read(&eb->spinning_writers) != 1);
@@ -65,14 +65,15 @@ void btrfs_set_lock_blocking_rw(struct extent_buffer *eb, int rw)
  */
 void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
-       }
+       /*
+        * no lock is required.  The lock owner may change if
+        * we have a read lock, but it won't change to or away
+        * from us.  If we have the write lock, we are the owner
+        * and it'll never change.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner)
+               return;
+
        if (rw == BTRFS_WRITE_LOCK_BLOCKING) {
                BUG_ON(atomic_read(&eb->blocking_writers) != 1);
                write_lock(&eb->lock);
@@ -99,6 +100,9 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw)
 void btrfs_tree_read_lock(struct extent_buffer *eb)
 {
 again:
+       BUG_ON(!atomic_read(&eb->blocking_writers) &&
+              current->pid == eb->lock_owner);
+
        read_lock(&eb->lock);
        if (atomic_read(&eb->blocking_writers) &&
            current->pid == eb->lock_owner) {
@@ -132,7 +136,9 @@ int btrfs_try_tree_read_lock(struct extent_buffer *eb)
        if (atomic_read(&eb->blocking_writers))
                return 0;
 
-       read_lock(&eb->lock);
+       if (!read_trylock(&eb->lock))
+               return 0;
+
        if (atomic_read(&eb->blocking_writers)) {
                read_unlock(&eb->lock);
                return 0;
@@ -151,7 +157,10 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers))
                return 0;
-       write_lock(&eb->lock);
+
+       if (!write_trylock(&eb->lock))
+               return 0;
+
        if (atomic_read(&eb->blocking_writers) ||
            atomic_read(&eb->blocking_readers)) {
                write_unlock(&eb->lock);
@@ -168,14 +177,15 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock(struct extent_buffer *eb)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       eb->lock_nested = 0;
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
+       /*
+        * if we're nested, we have the write lock.  No new locking
+        * is needed as long as we are the lock owner.
+        * The write unlock will do a barrier for us, and the lock_nested
+        * field only matters to the lock owner.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner) {
+               eb->lock_nested = 0;
+               return;
        }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->spinning_readers) == 0);
@@ -189,14 +199,15 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb)
  */
 void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb)
 {
-       if (eb->lock_nested) {
-               read_lock(&eb->lock);
-               if (eb->lock_nested && current->pid == eb->lock_owner) {
-                       eb->lock_nested = 0;
-                       read_unlock(&eb->lock);
-                       return;
-               }
-               read_unlock(&eb->lock);
+       /*
+        * if we're nested, we have the write lock.  No new locking
+        * is needed as long as we are the lock owner.
+        * The write unlock will do a barrier for us, and the lock_nested
+        * field only matters to the lock owner.
+        */
+       if (eb->lock_nested && current->pid == eb->lock_owner) {
+               eb->lock_nested = 0;
+               return;
        }
        btrfs_assert_tree_read_locked(eb);
        WARN_ON(atomic_read(&eb->blocking_readers) == 0);
@@ -244,6 +255,7 @@ void btrfs_tree_unlock(struct extent_buffer *eb)
        BUG_ON(blockers > 1);
 
        btrfs_assert_tree_locked(eb);
+       eb->lock_owner = 0;
        atomic_dec(&eb->write_locks);
 
        if (blockers) {
index e12441c..7187b14 100644 (file)
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
                                           log_list);
                list_del_init(&ordered->log_list);
                spin_unlock_irq(&log->log_extents_lock[index]);
+
+               if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
+                   !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
+                       struct inode *inode = ordered->inode;
+                       u64 start = ordered->file_offset;
+                       u64 end = ordered->file_offset + ordered->len - 1;
+
+                       WARN_ON(!inode);
+                       filemap_fdatawrite_range(inode->i_mapping, start, end);
+               }
                wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
                                                   &ordered->flags));
+
                btrfs_put_ordered_extent(ordered);
                spin_lock_irq(&log->log_extents_lock[index]);
        }
index 6efd70d..9626b4a 100644 (file)
@@ -54,7 +54,7 @@ static void print_extent_data_ref(struct extent_buffer *eb,
               btrfs_extent_data_ref_count(eb, ref));
 }
 
-static void print_extent_item(struct extent_buffer *eb, int slot)
+static void print_extent_item(struct extent_buffer *eb, int slot, int type)
 {
        struct btrfs_extent_item *ei;
        struct btrfs_extent_inline_ref *iref;
@@ -63,7 +63,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
        struct btrfs_disk_key key;
        unsigned long end;
        unsigned long ptr;
-       int type;
        u32 item_size = btrfs_item_size_nr(eb, slot);
        u64 flags;
        u64 offset;
@@ -88,7 +87,8 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
               btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
               flags);
 
-       if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
+       if ((type == BTRFS_EXTENT_ITEM_KEY) &&
+           flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
                struct btrfs_tree_block_info *info;
                info = (struct btrfs_tree_block_info *)(ei + 1);
                btrfs_tree_block_key(eb, info, &key);
@@ -223,7 +223,8 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
                                btrfs_disk_root_refs(l, ri));
                        break;
                case BTRFS_EXTENT_ITEM_KEY:
-                       print_extent_item(l, i);
+               case BTRFS_METADATA_ITEM_KEY:
+                       print_extent_item(l, i, type);
                        break;
                case BTRFS_TREE_BLOCK_REF_KEY:
                        printk(KERN_INFO "\t\ttree block backref\n");
index 4055291..4a88f07 100644 (file)
@@ -1956,9 +1956,10 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
         * pages are going to be uptodate.
         */
        for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
-               if (rbio->faila == stripe ||
-                   rbio->failb == stripe)
+               if (rbio->faila == stripe || rbio->failb == stripe) {
+                       atomic_inc(&rbio->bbio->error);
                        continue;
+               }
 
                for (pagenr = 0; pagenr < nr_pages; pagenr++) {
                        struct page *p;
index ac80188..b6d198f 100644 (file)
@@ -2725,11 +2725,8 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent);
                length = btrfs_dev_extent_length(l, dev_extent);
 
-               if (found_key.offset + length <= start) {
-                       key.offset = found_key.offset + length;
-                       btrfs_release_path(path);
-                       continue;
-               }
+               if (found_key.offset + length <= start)
+                       goto skip;
 
                chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent);
                chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent);
@@ -2740,10 +2737,12 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
                 * the chunk from going away while we scrub it
                 */
                cache = btrfs_lookup_block_group(fs_info, chunk_offset);
-               if (!cache) {
-                       ret = -ENOENT;
-                       break;
-               }
+
+               /* some chunks are removed but not committed to disk yet,
+                * continue scrubbing */
+               if (!cache)
+                       goto skip;
+
                dev_replace->cursor_right = found_key.offset + length;
                dev_replace->cursor_left = found_key.offset;
                dev_replace->item_needs_writeback = 1;
@@ -2802,7 +2801,7 @@ int scrub_enumerate_chunks(struct scrub_ctx *sctx,
 
                dev_replace->cursor_left = dev_replace->cursor_right;
                dev_replace->item_needs_writeback = 1;
-
+skip:
                key.offset = found_key.offset + length;
                btrfs_release_path(path);
        }
index 4662d92..8e16bca 100644 (file)
@@ -522,9 +522,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
                case Opt_ssd_spread:
                        btrfs_set_and_info(root, SSD_SPREAD,
                                           "use spread ssd allocation scheme");
+                       btrfs_set_opt(info->mount_opt, SSD);
                        break;
                case Opt_nossd:
-                       btrfs_clear_and_info(root, NOSSD,
+                       btrfs_set_and_info(root, NOSSD,
                                             "not using ssd allocation scheme");
                        btrfs_clear_opt(info->mount_opt, SSD);
                        break;
@@ -1467,7 +1468,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
                        goto restore;
 
                /* recover relocation */
+               mutex_lock(&fs_info->cleaner_mutex);
                ret = btrfs_recover_relocation(root);
+               mutex_unlock(&fs_info->cleaner_mutex);
                if (ret)
                        goto restore;
 
@@ -1808,6 +1811,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
                list_for_each_entry(dev, head, dev_list) {
                        if (dev->missing)
                                continue;
+                       if (!dev->name)
+                               continue;
                        if (!first_dev || dev->devid < first_dev->devid)
                                first_dev = dev;
                }
index df39458..7869936 100644 (file)
@@ -605,14 +605,37 @@ static void init_feature_attrs(void)
        }
 }
 
-static int add_device_membership(struct btrfs_fs_info *fs_info)
+int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
+               struct btrfs_device *one_device)
+{
+       struct hd_struct *disk;
+       struct kobject *disk_kobj;
+
+       if (!fs_info->device_dir_kobj)
+               return -EINVAL;
+
+       if (one_device) {
+               disk = one_device->bdev->bd_part;
+               disk_kobj = &part_to_dev(disk)->kobj;
+
+               sysfs_remove_link(fs_info->device_dir_kobj,
+                                               disk_kobj->name);
+       }
+
+       return 0;
+}
+
+int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
+               struct btrfs_device *one_device)
 {
        int error = 0;
        struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
        struct btrfs_device *dev;
 
-       fs_info->device_dir_kobj = kobject_create_and_add("devices",
+       if (!fs_info->device_dir_kobj)
+               fs_info->device_dir_kobj = kobject_create_and_add("devices",
                                                &fs_info->super_kobj);
+
        if (!fs_info->device_dir_kobj)
                return -ENOMEM;
 
@@ -623,6 +646,9 @@ static int add_device_membership(struct btrfs_fs_info *fs_info)
                if (!dev->bdev)
                        continue;
 
+               if (one_device && one_device != dev)
+                       continue;
+
                disk = dev->bdev->bd_part;
                disk_kobj = &part_to_dev(disk)->kobj;
 
@@ -666,7 +692,7 @@ int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info)
        if (error)
                goto failure;
 
-       error = add_device_membership(fs_info);
+       error = btrfs_kobj_add_device(fs_info, NULL);
        if (error)
                goto failure;
 
index 9ab5763..ac46df3 100644 (file)
@@ -66,4 +66,8 @@ char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
 extern const char * const btrfs_feature_set_names[3];
 extern struct kobj_type space_info_ktype;
 extern struct kobj_type btrfs_raid_ktype;
+int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
+               struct btrfs_device *one_device);
+int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
+                struct btrfs_device *one_device);
 #endif /* _BTRFS_SYSFS_H_ */
index 511839c..5f379af 100644 (file)
@@ -386,11 +386,13 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
        bool reloc_reserved = false;
        int ret;
 
+       /* Send isn't supposed to start transactions. */
+       ASSERT(current->journal_info != (void *)BTRFS_SEND_TRANS_STUB);
+
        if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
                return ERR_PTR(-EROFS);
 
-       if (current->journal_info &&
-           current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
+       if (current->journal_info) {
                WARN_ON(type & TRANS_EXTWRITERS);
                h = current->journal_info;
                h->use_count++;
@@ -491,6 +493,7 @@ again:
        smp_mb();
        if (cur_trans->state >= TRANS_STATE_BLOCKED &&
            may_wait_transaction(root, type)) {
+               current->journal_info = h;
                btrfs_commit_transaction(h, root);
                goto again;
        }
@@ -1615,11 +1618,6 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
        int ret;
 
        ret = btrfs_run_delayed_items(trans, root);
-       /*
-        * running the delayed items may have added new refs. account
-        * them now so that they hinder processing of more delayed refs
-        * as little as possible.
-        */
        if (ret)
                return ret;
 
index ffeed6d..6cb82f6 100644 (file)
@@ -40,6 +40,7 @@
 #include "rcu-string.h"
 #include "math.h"
 #include "dev-replace.h"
+#include "sysfs.h"
 
 static int init_first_rw_device(struct btrfs_trans_handle *trans,
                                struct btrfs_root *root,
@@ -554,12 +555,14 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
                 * This is ok to do without rcu read locked because we hold the
                 * uuid mutex so nothing we touch in here is going to disappear.
                 */
-               name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
-               if (!name) {
-                       kfree(device);
-                       goto error;
+               if (orig_dev->name) {
+                       name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
+                       if (!name) {
+                               kfree(device);
+                               goto error;
+                       }
+                       rcu_assign_pointer(device->name, name);
                }
-               rcu_assign_pointer(device->name, name);
 
                list_add(&device->dev_list, &fs_devices->devices);
                device->fs_devices = fs_devices;
@@ -1677,8 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
        if (device->bdev == root->fs_info->fs_devices->latest_bdev)
                root->fs_info->fs_devices->latest_bdev = next_device->bdev;
 
-       if (device->bdev)
+       if (device->bdev) {
                device->fs_devices->open_devices--;
+               /* remove sysfs entry */
+               btrfs_kobj_rm_device(root->fs_info, device);
+       }
 
        call_rcu(&device->rcu, free_device);
 
@@ -2143,9 +2149,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
        total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
        btrfs_set_super_num_devices(root->fs_info->super_copy,
                                    total_bytes + 1);
+
+       /* add sysfs device entry */
+       btrfs_kobj_add_device(root->fs_info, device);
+
        mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
 
        if (seeding_dev) {
+               char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
                ret = init_first_rw_device(trans, root, device);
                if (ret) {
                        btrfs_abort_transaction(trans, root, ret);
@@ -2156,6 +2167,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
                        btrfs_abort_transaction(trans, root, ret);
                        goto error_trans;
                }
+
+               /* Sprouting would change fsid of the mounted root,
+                * so rename the fsid on the sysfs
+                */
+               snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
+                                               root->fs_info->fsid);
+               if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
+                       goto error_trans;
        } else {
                ret = btrfs_add_device(trans, root, device);
                if (ret) {
@@ -2205,6 +2224,7 @@ error_trans:
        unlock_chunks(root);
        btrfs_end_transaction(trans, root);
        rcu_string_free(device->name);
+       btrfs_kobj_rm_device(root->fs_info, device);
        kfree(device);
 error:
        blkdev_put(bdev, FMODE_EXCL);
@@ -2543,9 +2563,6 @@ static int btrfs_relocate_chunk(struct btrfs_root *root,
        remove_extent_mapping(em_tree, em);
        write_unlock(&em_tree->lock);
 
-       kfree(map);
-       em->bdev = NULL;
-
        /* once for the tree */
        free_extent_map(em);
        /* once for us */
@@ -4301,9 +4318,11 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 
        em = alloc_extent_map();
        if (!em) {
+               kfree(map);
                ret = -ENOMEM;
                goto error;
        }
+       set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
        em->bdev = (struct block_device *)map;
        em->start = start;
        em->len = num_bytes;
@@ -4346,7 +4365,6 @@ error_del_extent:
        /* One for the tree reference */
        free_extent_map(em);
 error:
-       kfree(map);
        kfree(devices_info);
        return ret;
 }
@@ -4558,7 +4576,6 @@ void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree)
                write_unlock(&tree->map_tree.lock);
                if (!em)
                        break;
-               kfree(em->bdev);
                /* once for us */
                free_extent_map(em);
                /* once for the tree */
@@ -5362,6 +5379,15 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
        return 0;
 }
 
+static inline void btrfs_end_bbio(struct btrfs_bio *bbio, struct bio *bio, int err)
+{
+       if (likely(bbio->flags & BTRFS_BIO_ORIG_BIO_SUBMITTED))
+               bio_endio_nodec(bio, err);
+       else
+               bio_endio(bio, err);
+       kfree(bbio);
+}
+
 static void btrfs_end_bio(struct bio *bio, int err)
 {
        struct btrfs_bio *bbio = bio->bi_private;
@@ -5402,12 +5428,6 @@ static void btrfs_end_bio(struct bio *bio, int err)
                        bio = bbio->orig_bio;
                }
 
-               /*
-                * We have original bio now. So increment bi_remaining to
-                * account for it in endio
-                */
-               atomic_inc(&bio->bi_remaining);
-
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
@@ -5424,9 +5444,8 @@ static void btrfs_end_bio(struct bio *bio, int err)
                        set_bit(BIO_UPTODATE, &bio->bi_flags);
                        err = 0;
                }
-               kfree(bbio);
 
-               bio_endio(bio, err);
+               btrfs_end_bbio(bbio, bio, err);
        } else if (!is_orig_bio) {
                bio_put(bio);
        }
@@ -5589,12 +5608,15 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
 {
        atomic_inc(&bbio->error);
        if (atomic_dec_and_test(&bbio->stripes_pending)) {
+               /* Shoud be the original bio. */
+               WARN_ON(bio != bbio->orig_bio);
+
                bio->bi_private = bbio->private;
                bio->bi_end_io = bbio->end_io;
                btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
                bio->bi_iter.bi_sector = logical >> 9;
-               kfree(bbio);
-               bio_endio(bio, -EIO);
+
+               btrfs_end_bbio(bbio, bio, -EIO);
        }
 }
 
@@ -5681,6 +5703,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
                        BUG_ON(!bio); /* -ENOMEM */
                } else {
                        bio = first_bio;
+                       bbio->flags |= BTRFS_BIO_ORIG_BIO_SUBMITTED;
                }
 
                submit_stripe_bio(root, bbio, bio,
@@ -5822,6 +5845,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                return -ENOMEM;
        }
 
+       set_bit(EXTENT_FLAG_FS_MAPPING, &em->flags);
        em->bdev = (struct block_device *)map;
        em->start = logical;
        em->len = length;
@@ -5846,7 +5870,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                map->stripes[i].dev = btrfs_find_device(root->fs_info, devid,
                                                        uuid, NULL);
                if (!map->stripes[i].dev && !btrfs_test_opt(root, DEGRADED)) {
-                       kfree(map);
                        free_extent_map(em);
                        return -EIO;
                }
@@ -5854,7 +5877,6 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key,
                        map->stripes[i].dev =
                                add_missing_dev(root, devid, uuid);
                        if (!map->stripes[i].dev) {
-                               kfree(map);
                                free_extent_map(em);
                                return -EIO;
                        }
index 1a15bbe..2aaa00c 100644 (file)
@@ -190,11 +190,14 @@ struct btrfs_bio_stripe {
 struct btrfs_bio;
 typedef void (btrfs_bio_end_io_t) (struct btrfs_bio *bio, int err);
 
+#define BTRFS_BIO_ORIG_BIO_SUBMITTED   0x1
+
 struct btrfs_bio {
        atomic_t stripes_pending;
        struct btrfs_fs_info *fs_info;
        bio_end_io_t *end_io;
        struct bio *orig_bio;
+       unsigned long flags;
        void *private;
        atomic_t error;
        int max_errors;
index 4f19631..b67d8fc 100644 (file)
@@ -136,7 +136,7 @@ static int zlib_compress_pages(struct list_head *ws,
                if (workspace->def_strm.total_in > 8192 &&
                    workspace->def_strm.total_in <
                    workspace->def_strm.total_out) {
-                       ret = -EIO;
+                       ret = -E2BIG;
                        goto out;
                }
                /* we need another page for writing out.  Test this
index 0227b45..15e9505 100644 (file)
@@ -290,7 +290,8 @@ int
 cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                 const struct nls_table *cp, int mapChars)
 {
-       int i, j, charlen;
+       int i, charlen;
+       int j = 0;
        char src_char;
        __le16 dst_char;
        wchar_t tmp;
@@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
        if (!mapChars)
                return cifs_strtoUTF16(target, source, PATH_MAX, cp);
 
-       for (i = 0, j = 0; i < srclen; j++) {
+       for (i = 0; i < srclen; j++) {
                src_char = source[i];
                charlen = 1;
                switch (src_char) {
                case 0:
-                       put_unaligned(0, &target[j]);
                        goto ctoUTF16_out;
                case ':':
                        dst_char = cpu_to_le16(UNI_COLON);
@@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
        }
 
 ctoUTF16_out:
+       put_unaligned(0, &target[j]); /* Null terminate target unicode string */
        return j;
 }
 
index 2c90d07..8883980 100644 (file)
@@ -725,6 +725,19 @@ out_nls:
        goto out;
 }
 
+static ssize_t
+cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
+{
+       ssize_t rc;
+       struct inode *inode = file_inode(iocb->ki_filp);
+
+       rc = cifs_revalidate_mapping(inode);
+       if (rc)
+               return rc;
+
+       return generic_file_read_iter(iocb, iter);
+}
+
 static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
 {
        struct inode *inode = file_inode(iocb->ki_filp);
@@ -881,7 +894,7 @@ const struct inode_operations cifs_symlink_inode_ops = {
 const struct file_operations cifs_file_ops = {
        .read = new_sync_read,
        .write = new_sync_write,
-       .read_iter = generic_file_read_iter,
+       .read_iter = cifs_loose_read_iter,
        .write_iter = cifs_file_write_iter,
        .open = cifs_open,
        .release = cifs_close,
@@ -939,7 +952,7 @@ const struct file_operations cifs_file_direct_ops = {
 const struct file_operations cifs_file_nobrl_ops = {
        .read = new_sync_read,
        .write = new_sync_write,
-       .read_iter = generic_file_read_iter,
+       .read_iter = cifs_loose_read_iter,
        .write_iter = cifs_file_write_iter,
        .open = cifs_open,
        .release = cifs_close,
index 264ece7..68559fd 100644 (file)
@@ -374,7 +374,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
        oparms.cifs_sb = cifs_sb;
        oparms.desired_access = GENERIC_WRITE;
        oparms.create_options = create_options;
-       oparms.disposition = FILE_OPEN;
+       oparms.disposition = FILE_CREATE;
        oparms.path = path;
        oparms.fid = &fid;
        oparms.reconnect = false;
index 0b2528f..a93f7e6 100644 (file)
@@ -306,7 +306,7 @@ static int zap_threads(struct task_struct *tsk, struct mm_struct *mm,
        if (unlikely(nr < 0))
                return nr;
 
-       tsk->flags = PF_DUMPCORE;
+       tsk->flags |= PF_DUMPCORE;
        if (atomic_read(&mm->mm_users) == nr + 1)
                goto done;
        /*
index 98040ba..194d0d1 100644 (file)
@@ -198,9 +198,8 @@ static inline int dio_refill_pages(struct dio *dio, struct dio_submit *sdio)
  * L1 cache.
  */
 static inline struct page *dio_get_page(struct dio *dio,
-               struct dio_submit *sdio, size_t *from, size_t *to)
+                                       struct dio_submit *sdio)
 {
-       int n;
        if (dio_pages_present(sdio) == 0) {
                int ret;
 
@@ -209,10 +208,7 @@ static inline struct page *dio_get_page(struct dio *dio,
                        return ERR_PTR(ret);
                BUG_ON(dio_pages_present(sdio) == 0);
        }
-       n = sdio->head++;
-       *from = n ? 0 : sdio->from;
-       *to = (n == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
-       return dio->pages[n];
+       return dio->pages[sdio->head];
 }
 
 /**
@@ -911,11 +907,15 @@ static int do_direct_IO(struct dio *dio, struct dio_submit *sdio,
        while (sdio->block_in_file < sdio->final_block_in_request) {
                struct page *page;
                size_t from, to;
-               page = dio_get_page(dio, sdio, &from, &to);
+
+               page = dio_get_page(dio, sdio);
                if (IS_ERR(page)) {
                        ret = PTR_ERR(page);
                        goto out;
                }
+               from = sdio->head ? 0 : sdio->from;
+               to = (sdio->head == sdio->tail - 1) ? sdio->to : PAGE_SIZE;
+               sdio->head++;
 
                while (from < to) {
                        unsigned this_chunk_bytes;      /* # of bytes mapped */
index b73e062..b10b48c 100644 (file)
@@ -910,7 +910,7 @@ static const struct file_operations eventpoll_fops = {
 void eventpoll_release_file(struct file *file)
 {
        struct eventpoll *ep;
-       struct epitem *epi;
+       struct epitem *epi, *next;
 
        /*
         * We don't want to get "file->f_lock" because it is not
@@ -926,7 +926,7 @@ void eventpoll_release_file(struct file *file)
         * Besides, ep_remove() acquires the lock, so we can't hold it here.
         */
        mutex_lock(&epmutex);
-       list_for_each_entry_rcu(epi, &file->f_ep_links, fllink) {
+       list_for_each_entry_safe(epi, next, &file->f_ep_links, fllink) {
                ep = epi->ep;
                mutex_lock_nested(&ep->mtx, 0);
                ep_remove(ep, epi);
index 0762d14..fca3820 100644 (file)
@@ -194,7 +194,16 @@ static void ext4_init_block_bitmap(struct super_block *sb,
        if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                grp = ext4_get_group_info(sb, block_group);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+               if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, gdp);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
                return;
        }
@@ -359,6 +368,7 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
 {
        ext4_fsblk_t    blk;
        struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
 
        if (buffer_verified(bh))
                return;
@@ -369,6 +379,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
                ext4_unlock_group(sb, block_group);
                ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
                           block_group, blk);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
                return;
        }
@@ -376,6 +389,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
                        desc, bh))) {
                ext4_unlock_group(sb, block_group);
                ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
                return;
        }
index 3f5c188..0b7e28e 100644 (file)
@@ -966,10 +966,10 @@ retry:
                        continue;
                }
 
-               if (ei->i_es_lru_nr == 0 || ei == locked_ei)
+               if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
+                   !write_trylock(&ei->i_es_lock))
                        continue;
 
-               write_lock(&ei->i_es_lock);
                shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
                if (ei->i_es_lru_nr == 0)
                        list_del_init(&ei->i_es_lru);
index 0ee59a6..5b87fc3 100644 (file)
@@ -71,6 +71,7 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
                                       struct ext4_group_desc *gdp)
 {
        struct ext4_group_info *grp;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        J_ASSERT_BH(bh, buffer_locked(bh));
 
        /* If checksum is bad mark all blocks and inodes use to prevent
@@ -78,7 +79,16 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
        if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
                ext4_error(sb, "Checksum bad for group %u", block_group);
                grp = ext4_get_group_info(sb, block_group);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
+               if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, gdp);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
                return 0;
        }
@@ -116,6 +126,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
        struct buffer_head *bh = NULL;
        ext4_fsblk_t bitmap_blk;
        struct ext4_group_info *grp;
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
 
        desc = ext4_get_group_desc(sb, block_group, NULL);
        if (!desc)
@@ -185,6 +196,12 @@ verify:
                ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
                           "inode_bitmap = %llu", block_group, bitmap_blk);
                grp = ext4_get_group_info(sb, block_group);
+               if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, desc);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
                return NULL;
        }
@@ -321,6 +338,12 @@ out:
                        fatal = err;
        } else {
                ext4_error(sb, "bit already cleared for inode %lu", ino);
+               if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
+                       int count;
+                       count = ext4_free_inodes_count(sb, gdp);
+                       percpu_counter_sub(&sbi->s_freeinodes_counter,
+                                          count);
+               }
                set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
        }
 
@@ -851,6 +874,13 @@ got:
                goto out;
        }
 
+       BUFFER_TRACE(group_desc_bh, "get_write_access");
+       err = ext4_journal_get_write_access(handle, group_desc_bh);
+       if (err) {
+               ext4_std_error(sb, err);
+               goto out;
+       }
+
        /* We may have to initialize the block bitmap if it isn't already */
        if (ext4_has_group_desc_csum(sb) &&
            gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -887,13 +917,6 @@ got:
                }
        }
 
-       BUFFER_TRACE(group_desc_bh, "get_write_access");
-       err = ext4_journal_get_write_access(handle, group_desc_bh);
-       if (err) {
-               ext4_std_error(sb, err);
-               goto out;
-       }
-
        /* Update the relevant bg descriptor fields */
        if (ext4_has_group_desc_csum(sb)) {
                int free;
index 8a57e9f..fd69da1 100644 (file)
@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
        return 0;
 failed:
        for (; i >= 0; i--) {
-               if (i != indirect_blks && branch[i].bh)
+               /*
+                * We want to ext4_forget() only freshly allocated indirect
+                * blocks.  Buffer for new_blocks[i-1] is at branch[i].bh and
+                * buffer at branch[0].bh is indirect block / inode already
+                * existing before ext4_alloc_branch() was called.
+                */
+               if (i > 0 && i != indirect_blks && branch[i].bh)
                        ext4_forget(handle, 1, inode, branch[i].bh,
                                    branch[i].bh->b_blocknr);
                ext4_free_blocks(handle, inode, NULL, new_blocks[i],
@@ -1310,16 +1316,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
                blk = *i_data;
                if (level > 0) {
                        ext4_lblk_t first2;
+                       ext4_lblk_t count2;
+
                        bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
                        if (!bh) {
                                EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
                                                       "Read failure");
                                return -EIO;
                        }
-                       first2 = (first > offset) ? first - offset : 0;
+                       if (first > offset) {
+                               first2 = first - offset;
+                               count2 = count;
+                       } else {
+                               first2 = 0;
+                               count2 = count - (offset - first);
+                       }
                        ret = free_hole_blocks(handle, inode, bh,
                                               (__le32 *)bh->b_data, level - 1,
-                                              first2, count - offset,
+                                              first2, count2,
                                               inode->i_sb->s_blocksize >> 2);
                        if (ret) {
                                brelse(bh);
@@ -1329,8 +1343,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
                if (level == 0 ||
                    (bh && all_zeroes((__le32 *)bh->b_data,
                                      (__le32 *)bh->b_data + addr_per_block))) {
-                       ext4_free_data(handle, inode, parent_bh, &blk, &blk+1);
-                       *i_data = 0;
+                       ext4_free_data(handle, inode, parent_bh,
+                                      i_data, i_data + 1);
                }
                brelse(bh);
                bh = NULL;
index 59e3162..2dcb936 100644 (file)
@@ -722,6 +722,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
                                void *buddy, void *bitmap, ext4_group_t group)
 {
        struct ext4_group_info *grp = ext4_get_group_info(sb, group);
+       struct ext4_sb_info *sbi = EXT4_SB(sb);
        ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
        ext4_grpblk_t i = 0;
        ext4_grpblk_t first;
@@ -751,14 +752,17 @@ void ext4_mb_generate_buddy(struct super_block *sb,
 
        if (free != grp->bb_free) {
                ext4_grp_locked_error(sb, group, 0, 0,
-                                     "%u clusters in bitmap, %u in gd; "
-                                     "block bitmap corrupt.",
+                                     "block bitmap and bg descriptor "
+                                     "inconsistent: %u vs %u free clusters",
                                      free, grp->bb_free);
                /*
                 * If we intend to continue, we consider group descriptor
                 * corrupt and update bb_free using bitmap value
                 */
                grp->bb_free = free;
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          grp->bb_free);
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
        }
        mb_set_largest_free_order(sb, grp);
@@ -1431,6 +1435,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
                right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
 
        if (unlikely(block != -1)) {
+               struct ext4_sb_info *sbi = EXT4_SB(sb);
                ext4_fsblk_t blocknr;
 
                blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
@@ -1441,6 +1446,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
                                      "freeing already freed block "
                                      "(bit %u); block bitmap corrupt.",
                                      block);
+               if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
+                       percpu_counter_sub(&sbi->s_freeclusters_counter,
+                                          e4b->bd_info->bb_free);
                /* Mark the block group as corrupt. */
                set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
                        &e4b->bd_info->bb_state);
index b9b9aab..6df7bc6 100644 (file)
@@ -1525,8 +1525,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
                        arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
                sbi->s_commit_interval = HZ * arg;
        } else if (token == Opt_max_batch_time) {
-               if (arg == 0)
-                       arg = EXT4_DEF_MAX_BATCH_TIME;
                sbi->s_max_batch_time = arg;
        } else if (token == Opt_min_batch_time) {
                sbi->s_min_batch_time = arg;
@@ -2809,10 +2807,11 @@ static void print_daily_error_info(unsigned long arg)
        es = sbi->s_es;
 
        if (es->s_error_count)
-               ext4_msg(sb, KERN_NOTICE, "error count: %u",
+               /* fsck newer than v1.41.13 is needed to clean this condition. */
+               ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
                         le32_to_cpu(es->s_error_count));
        if (es->s_first_error_time) {
-               printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
+               printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
                       sb->s_id, le32_to_cpu(es->s_first_error_time),
                       (int) sizeof(es->s_first_error_func),
                       es->s_first_error_func,
@@ -2826,7 +2825,7 @@ static void print_daily_error_info(unsigned long arg)
                printk("\n");
        }
        if (es->s_last_error_time) {
-               printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
+               printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
                       sb->s_id, le32_to_cpu(es->s_last_error_time),
                       (int) sizeof(es->s_last_error_func),
                       es->s_last_error_func,
@@ -3880,38 +3879,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
                        goto failed_mount2;
                }
        }
-
-       /*
-        * set up enough so that it can read an inode,
-        * and create new inode for buddy allocator
-        */
-       sbi->s_gdb_count = db_count;
-       if (!test_opt(sb, NOLOAD) &&
-           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
-               sb->s_op = &ext4_sops;
-       else
-               sb->s_op = &ext4_nojournal_sops;
-
-       ext4_ext_init(sb);
-       err = ext4_mb_init(sb);
-       if (err) {
-               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
-                        err);
-               goto failed_mount2;
-       }
-
        if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
                ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
-               goto failed_mount2a;
+               goto failed_mount2;
        }
        if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
                if (!ext4_fill_flex_info(sb)) {
                        ext4_msg(sb, KERN_ERR,
                               "unable to initialize "
                               "flex_bg meta info!");
-                       goto failed_mount2a;
+                       goto failed_mount2;
                }
 
+       sbi->s_gdb_count = db_count;
        get_random_bytes(&sbi->s_next_generation, sizeof(u32));
        spin_lock_init(&sbi->s_next_gen_lock);
 
@@ -3946,6 +3926,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
        sbi->s_stripe = ext4_get_stripe_size(sbi);
        sbi->s_extent_max_zeroout_kb = 32;
 
+       /*
+        * set up enough so that it can read an inode
+        */
+       if (!test_opt(sb, NOLOAD) &&
+           EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
+               sb->s_op = &ext4_sops;
+       else
+               sb->s_op = &ext4_nojournal_sops;
        sb->s_export_op = &ext4_export_ops;
        sb->s_xattr = ext4_xattr_handlers;
 #ifdef CONFIG_QUOTA
@@ -4135,13 +4123,21 @@ no_journal:
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
                         "reserved pool", ext4_calculate_resv_clusters(sb));
-               goto failed_mount5;
+               goto failed_mount4a;
        }
 
        err = ext4_setup_system_zone(sb);
        if (err) {
                ext4_msg(sb, KERN_ERR, "failed to initialize system "
                         "zone (%d)", err);
+               goto failed_mount4a;
+       }
+
+       ext4_ext_init(sb);
+       err = ext4_mb_init(sb);
+       if (err) {
+               ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
+                        err);
                goto failed_mount5;
        }
 
@@ -4218,8 +4214,11 @@ failed_mount8:
 failed_mount7:
        ext4_unregister_li_request(sb);
 failed_mount6:
-       ext4_release_system_zone(sb);
+       ext4_mb_release(sb);
 failed_mount5:
+       ext4_ext_release(sb);
+       ext4_release_system_zone(sb);
+failed_mount4a:
        dput(sb->s_root);
        sb->s_root = NULL;
 failed_mount4:
@@ -4243,14 +4242,11 @@ failed_mount3:
        percpu_counter_destroy(&sbi->s_extent_cache_cnt);
        if (sbi->s_mmp_tsk)
                kthread_stop(sbi->s_mmp_tsk);
-failed_mount2a:
-       ext4_mb_release(sb);
 failed_mount2:
        for (i = 0; i < db_count; i++)
                brelse(sbi->s_group_desc[i]);
        ext4_kvfree(sbi->s_group_desc);
 failed_mount:
-       ext4_ext_release(sb);
        if (sbi->s_chksum_driver)
                crypto_free_shash(sbi->s_chksum_driver);
        if (sbi->s_proc) {
index 0924521..f8cf619 100644 (file)
@@ -608,8 +608,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
  *     b. do not use extent cache for better performance
  *     c. give the block addresses to blockdev
  */
-static int get_data_block(struct inode *inode, sector_t iblock,
-                       struct buffer_head *bh_result, int create)
+static int __get_data_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create, bool fiemap)
 {
        struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
        unsigned int blkbits = inode->i_sb->s_blocksize_bits;
@@ -637,7 +637,7 @@ static int get_data_block(struct inode *inode, sector_t iblock,
                        err = 0;
                goto unlock_out;
        }
-       if (dn.data_blkaddr == NEW_ADDR)
+       if (dn.data_blkaddr == NEW_ADDR && !fiemap)
                goto put_out;
 
        if (dn.data_blkaddr != NULL_ADDR) {
@@ -671,7 +671,7 @@ get_next:
                                err = 0;
                        goto unlock_out;
                }
-               if (dn.data_blkaddr == NEW_ADDR)
+               if (dn.data_blkaddr == NEW_ADDR && !fiemap)
                        goto put_out;
 
                end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -708,10 +708,23 @@ out:
        return err;
 }
 
+static int get_data_block(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       return __get_data_block(inode, iblock, bh_result, create, false);
+}
+
+static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
+                       struct buffer_head *bh_result, int create)
+{
+       return __get_data_block(inode, iblock, bh_result, create, true);
+}
+
 int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
                u64 start, u64 len)
 {
-       return generic_block_fiemap(inode, fieinfo, start, len, get_data_block);
+       return generic_block_fiemap(inode, fieinfo,
+                               start, len, get_data_block_fiemap);
 }
 
 static int f2fs_read_data_page(struct file *file, struct page *page)
index 966acb0..a4addd7 100644 (file)
@@ -376,11 +376,11 @@ static struct page *init_inode_metadata(struct inode *inode,
 
 put_error:
        f2fs_put_page(page, 1);
+error:
        /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
        truncate_inode_pages(&inode->i_data, 0);
        truncate_blocks(inode, 0);
        remove_dirty_dir_inode(inode);
-error:
        remove_inode_page(inode);
        return ERR_PTR(err);
 }
index e51c732..58df97e 100644 (file)
@@ -342,9 +342,6 @@ struct f2fs_sm_info {
        struct dirty_seglist_info *dirty_info;  /* dirty segment information */
        struct curseg_info *curseg_array;       /* active segment information */
 
-       struct list_head wblist_head;   /* list of under-writeback pages */
-       spinlock_t wblist_lock;         /* lock for checkpoint */
-
        block_t seg0_blkaddr;           /* block address of 0'th segment */
        block_t main_blkaddr;           /* start block address of main area */
        block_t ssa_blkaddr;            /* start block address of SSA area */
@@ -644,7 +641,8 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
  */
 static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
 {
-       WARN_ON((nid >= NM_I(sbi)->max_nid));
+       if (unlikely(nid < F2FS_ROOT_INO(sbi)))
+               return -EINVAL;
        if (unlikely(nid >= NM_I(sbi)->max_nid))
                return -EINVAL;
        return 0;
index c58e330..7d8b962 100644 (file)
@@ -659,16 +659,19 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
        off_start = offset & (PAGE_CACHE_SIZE - 1);
        off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
 
+       f2fs_lock_op(sbi);
+
        for (index = pg_start; index <= pg_end; index++) {
                struct dnode_of_data dn;
 
-               f2fs_lock_op(sbi);
+               if (index == pg_end && !off_end)
+                       goto noalloc;
+
                set_new_dnode(&dn, inode, NULL, NULL, 0);
                ret = f2fs_reserve_block(&dn, index);
-               f2fs_unlock_op(sbi);
                if (ret)
                        break;
-
+noalloc:
                if (pg_start == pg_end)
                        new_size = offset + len;
                else if (index == pg_start && off_start)
@@ -683,8 +686,9 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
                i_size_read(inode) < new_size) {
                i_size_write(inode, new_size);
                mark_inode_dirty(inode);
-               f2fs_write_inode(inode, NULL);
+               update_inode_page(inode);
        }
+       f2fs_unlock_op(sbi);
 
        return ret;
 }
index adc622c..2cf6962 100644 (file)
@@ -78,6 +78,7 @@ static int do_read_inode(struct inode *inode)
        if (check_nid_range(sbi, inode->i_ino)) {
                f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
                         (unsigned long) inode->i_ino);
+               WARN_ON(1);
                return -EINVAL;
        }
 
index 9138c32..a6bdddc 100644 (file)
@@ -417,9 +417,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
 
                f2fs_set_link(new_dir, new_entry, new_page, old_inode);
-               down_write(&F2FS_I(old_inode)->i_sem);
-               F2FS_I(old_inode)->i_pino = new_dir->i_ino;
-               up_write(&F2FS_I(old_inode)->i_sem);
 
                new_inode->i_ctime = CURRENT_TIME;
                down_write(&F2FS_I(new_inode)->i_sem);
@@ -448,6 +445,10 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                }
        }
 
+       down_write(&F2FS_I(old_inode)->i_sem);
+       file_lost_pino(old_inode);
+       up_write(&F2FS_I(old_inode)->i_sem);
+
        old_inode->i_ctime = CURRENT_TIME;
        mark_inode_dirty(old_inode);
 
@@ -457,9 +458,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
                if (old_dir != new_dir) {
                        f2fs_set_link(old_inode, old_dir_entry,
                                                old_dir_page, new_dir);
-                       down_write(&F2FS_I(old_inode)->i_sem);
-                       F2FS_I(old_inode)->i_pino = new_dir->i_ino;
-                       up_write(&F2FS_I(old_inode)->i_sem);
                        update_inode_page(old_inode);
                } else {
                        kunmap(old_dir_page);
@@ -474,7 +472,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
        return 0;
 
 put_out_dir:
-       f2fs_put_page(new_page, 1);
+       kunmap(new_page);
+       f2fs_put_page(new_page, 0);
 out_dir:
        if (old_dir_entry) {
                kunmap(old_dir_page);
index 9dfb9a0..4b697cc 100644 (file)
@@ -42,6 +42,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
                mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
                res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
        } else if (type == DIRTY_DENTS) {
+               if (sbi->sb->s_bdi->dirty_exceeded)
+                       return false;
                mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
                res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
        }
index f25f0e0..d04613d 100644 (file)
@@ -272,14 +272,15 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
                return -ENOMEM;
        spin_lock_init(&fcc->issue_lock);
        init_waitqueue_head(&fcc->flush_wait_queue);
+       sbi->sm_info->cmd_control_info = fcc;
        fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
                                "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
        if (IS_ERR(fcc->f2fs_issue_flush)) {
                err = PTR_ERR(fcc->f2fs_issue_flush);
                kfree(fcc);
+               sbi->sm_info->cmd_control_info = NULL;
                return err;
        }
-       sbi->sm_info->cmd_control_info = fcc;
 
        return err;
 }
@@ -1885,8 +1886,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
 
        /* init sm info */
        sbi->sm_info = sm_info;
-       INIT_LIST_HEAD(&sm_info->wblist_head);
-       spin_lock_init(&sm_info->wblist_lock);
        sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
        sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
        sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
index b2b1863..8f96d93 100644 (file)
@@ -689,9 +689,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
        struct f2fs_sb_info *sbi = F2FS_SB(sb);
        struct inode *inode;
 
-       if (unlikely(ino < F2FS_ROOT_INO(sbi)))
-               return ERR_PTR(-ESTALE);
-       if (unlikely(ino >= NM_I(sbi)->max_nid))
+       if (check_nid_range(sbi, ino))
                return ERR_PTR(-ESTALE);
 
        /*
index 098f97b..ca88731 100644 (file)
@@ -643,9 +643,8 @@ struct fuse_copy_state {
        unsigned long seglen;
        unsigned long addr;
        struct page *pg;
-       void *mapaddr;
-       void *buf;
        unsigned len;
+       unsigned offset;
        unsigned move_pages:1;
 };
 
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
        if (cs->currbuf) {
                struct pipe_buffer *buf = cs->currbuf;
 
-               if (!cs->write) {
-                       kunmap_atomic(cs->mapaddr);
-               } else {
-                       kunmap_atomic(cs->mapaddr);
+               if (cs->write)
                        buf->len = PAGE_SIZE - cs->len;
-               }
                cs->currbuf = NULL;
-               cs->mapaddr = NULL;
-       } else if (cs->mapaddr) {
-               kunmap_atomic(cs->mapaddr);
+       } else if (cs->pg) {
                if (cs->write) {
                        flush_dcache_page(cs->pg);
                        set_page_dirty_lock(cs->pg);
                }
                put_page(cs->pg);
-               cs->mapaddr = NULL;
        }
+       cs->pg = NULL;
 }
 
 /*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
  */
 static int fuse_copy_fill(struct fuse_copy_state *cs)
 {
-       unsigned long offset;
+       struct page *page;
        int err;
 
        unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
 
                        BUG_ON(!cs->nr_segs);
                        cs->currbuf = buf;
-                       cs->mapaddr = kmap_atomic(buf->page);
+                       cs->pg = buf->page;
+                       cs->offset = buf->offset;
                        cs->len = buf->len;
-                       cs->buf = cs->mapaddr + buf->offset;
                        cs->pipebufs++;
                        cs->nr_segs--;
                } else {
-                       struct page *page;
-
                        if (cs->nr_segs == cs->pipe->buffers)
                                return -EIO;
 
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        buf->len = 0;
 
                        cs->currbuf = buf;
-                       cs->mapaddr = kmap_atomic(page);
-                       cs->buf = cs->mapaddr;
+                       cs->pg = page;
+                       cs->offset = 0;
                        cs->len = PAGE_SIZE;
                        cs->pipebufs++;
                        cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
                        cs->iov++;
                        cs->nr_segs--;
                }
-               err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg);
+               err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
                if (err < 0)
                        return err;
                BUG_ON(err != 1);
-               offset = cs->addr % PAGE_SIZE;
-               cs->mapaddr = kmap_atomic(cs->pg);
-               cs->buf = cs->mapaddr + offset;
-               cs->len = min(PAGE_SIZE - offset, cs->seglen);
+               cs->pg = page;
+               cs->offset = cs->addr % PAGE_SIZE;
+               cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
                cs->seglen -= cs->len;
                cs->addr += cs->len;
        }
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
 {
        unsigned ncpy = min(*size, cs->len);
        if (val) {
+               void *pgaddr = kmap_atomic(cs->pg);
+               void *buf = pgaddr + cs->offset;
+
                if (cs->write)
-                       memcpy(cs->buf, *val, ncpy);
+                       memcpy(buf, *val, ncpy);
                else
-                       memcpy(*val, cs->buf, ncpy);
+                       memcpy(*val, buf, ncpy);
+
+               kunmap_atomic(pgaddr);
                *val += ncpy;
        }
        *size -= ncpy;
        cs->len -= ncpy;
-       cs->buf += ncpy;
+       cs->offset += ncpy;
        return ncpy;
 }
 
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
 out_fallback_unlock:
        unlock_page(newpage);
 out_fallback:
-       cs->mapaddr = kmap_atomic(buf->page);
-       cs->buf = cs->mapaddr + buf->offset;
+       cs->pg = buf->page;
+       cs->offset = buf->offset;
 
        err = lock_request(cs->fc, cs->req);
        if (err)
index 4219835..0c60482 100644 (file)
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
        inode = ACCESS_ONCE(entry->d_inode);
        if (inode && is_bad_inode(inode))
                goto invalid;
-       else if (fuse_dentry_time(entry) < get_jiffies_64()) {
+       else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
+                (flags & LOOKUP_REVAL)) {
                int err;
                struct fuse_entry_out outarg;
                struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
        return err;
 }
 
-static int fuse_rename(struct inode *olddir, struct dentry *oldent,
-                      struct inode *newdir, struct dentry *newent)
-{
-       return fuse_rename_common(olddir, oldent, newdir, newent, 0,
-                                 FUSE_RENAME, sizeof(struct fuse_rename_in));
-}
-
 static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
                        struct inode *newdir, struct dentry *newent,
                        unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
        if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
                return -EINVAL;
 
-       if (fc->no_rename2 || fc->minor < 23)
-               return -EINVAL;
+       if (flags) {
+               if (fc->no_rename2 || fc->minor < 23)
+                       return -EINVAL;
 
-       err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
-                                FUSE_RENAME2, sizeof(struct fuse_rename2_in));
-       if (err == -ENOSYS) {
-               fc->no_rename2 = 1;
-               err = -EINVAL;
+               err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
+                                        FUSE_RENAME2,
+                                        sizeof(struct fuse_rename2_in));
+               if (err == -ENOSYS) {
+                       fc->no_rename2 = 1;
+                       err = -EINVAL;
+               }
+       } else {
+               err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
+                                        FUSE_RENAME,
+                                        sizeof(struct fuse_rename_in));
        }
+
        return err;
+}
 
+static int fuse_rename(struct inode *olddir, struct dentry *oldent,
+                      struct inode *newdir, struct dentry *newent)
+{
+       return fuse_rename2(olddir, oldent, newdir, newent, 0);
 }
 
 static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
        int err;
        bool r;
 
-       if (fi->i_time < get_jiffies_64()) {
+       if (time_before64(fi->i_time, get_jiffies_64())) {
                r = true;
                err = fuse_do_getattr(inode, stat, file);
        } else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
            ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
                struct fuse_inode *fi = get_fuse_inode(inode);
 
-               if (fi->i_time < get_jiffies_64()) {
+               if (time_before64(fi->i_time, get_jiffies_64())) {
                        refreshed = true;
 
                        err = fuse_perm_getattr(inode, mask);
index 6e16dad..40ac262 100644 (file)
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
        error = -EIO;
        req->ff = fuse_write_file_get(fc, fi);
        if (!req->ff)
-               goto err_free;
+               goto err_nofile;
 
        fuse_write_fill(req, req->ff, page_offset(page), 0);
 
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
 
        return 0;
 
+err_nofile:
+       __free_page(tmp_page);
 err_free:
        fuse_request_free(req);
 err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
        data.ff = NULL;
 
        err = -ENOMEM;
-       data.orig_pages = kzalloc(sizeof(struct page *) *
-                                 FUSE_MAX_PAGES_PER_REQ,
+       data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
+                                 sizeof(struct page *),
                                  GFP_NOFS);
        if (!data.orig_pages)
                goto out;
index 754dcf2..03246cd 100644 (file)
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
        {OPT_ERR,                       NULL}
 };
 
+static int fuse_match_uint(substring_t *s, unsigned int *res)
+{
+       int err = -ENOMEM;
+       char *buf = match_strdup(s);
+       if (buf) {
+               err = kstrtouint(buf, 10, res);
+               kfree(buf);
+       }
+       return err;
+}
+
 static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
 {
        char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
        while ((p = strsep(&opt, ",")) != NULL) {
                int token;
                int value;
+               unsigned uv;
                substring_t args[MAX_OPT_ARGS];
                if (!*p)
                        continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
                        break;
 
                case OPT_USER_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->user_id = make_kuid(current_user_ns(), value);
+                       d->user_id = make_kuid(current_user_ns(), uv);
                        if (!uid_valid(d->user_id))
                                return 0;
                        d->user_id_present = 1;
                        break;
 
                case OPT_GROUP_ID:
-                       if (match_int(&args[0], &value))
+                       if (fuse_match_uint(&args[0], &uv))
                                return 0;
-                       d->group_id = make_kgid(current_user_ns(), value);
+                       d->group_id = make_kgid(current_user_ns(), uv);
                        if (!gid_valid(d->group_id))
                                return 0;
                        d->group_id_present = 1;
@@ -895,9 +907,6 @@ static void process_init_reply(struct fuse_conn *fc, struct fuse_req *req)
                                fc->writeback_cache = 1;
                        if (arg->time_gran && arg->time_gran <= 1000000000)
                                fc->sb->s_time_gran = arg->time_gran;
-                       else
-                               fc->sb->s_time_gran = 1000000000;
-
                } else {
                        ra_pages = fc->max_read / PAGE_CACHE_SIZE;
                        fc->no_lock = 1;
@@ -926,7 +935,7 @@ static void fuse_send_init(struct fuse_conn *fc, struct fuse_req *req)
                FUSE_SPLICE_WRITE | FUSE_SPLICE_MOVE | FUSE_SPLICE_READ |
                FUSE_FLOCK_LOCKS | FUSE_IOCTL_DIR | FUSE_AUTO_INVAL_DATA |
                FUSE_DO_READDIRPLUS | FUSE_READDIRPLUS_AUTO | FUSE_ASYNC_DIO |
-               FUSE_WRITEBACK_CACHE;
+               FUSE_WRITEBACK_CACHE | FUSE_NO_OPEN_SUPPORT;
        req->in.h.opcode = FUSE_INIT;
        req->in.numargs = 1;
        req->in.args[0].size = sizeof(*arg);
@@ -1006,7 +1015,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
 
        sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
 
-       if (!parse_fuse_opt((char *) data, &d, is_bdev))
+       if (!parse_fuse_opt(data, &d, is_bdev))
                goto err;
 
        if (is_bdev) {
index 4fc3a30..26b3f95 100644 (file)
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
        int error = 0;
 
        state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
-       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
+       flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
 
        mutex_lock(&fp->f_fl_mutex);
 
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
                        goto out;
                flock_lock_file_wait(file,
                                     &(struct file_lock){.fl_type = F_UNLCK});
-               gfs2_glock_dq_wait(fl_gh);
+               gfs2_glock_dq(fl_gh);
                gfs2_holder_reinit(state, flags, fl_gh);
        } else {
                error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
index c355f73..ee4e04f 100644 (file)
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
                cachep = gfs2_glock_aspace_cachep;
        else
                cachep = gfs2_glock_cachep;
-       gl = kmem_cache_alloc(cachep, GFP_KERNEL);
+       gl = kmem_cache_alloc(cachep, GFP_NOFS);
        if (!gl)
                return -ENOMEM;
 
        memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
 
        if (glops->go_flags & GLOF_LVB) {
-               gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL);
+               gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
                if (!gl->gl_lksb.sb_lvbptr) {
                        kmem_cache_free(cachep, gl);
                        return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
                gl = list_entry(list->next, struct gfs2_glock, gl_lru);
                list_del_init(&gl->gl_lru);
                if (!spin_trylock(&gl->gl_spin)) {
+add_back_to_lru:
                        list_add(&gl->gl_lru, &lru_list);
                        atomic_inc(&lru_count);
                        continue;
                }
+               if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+                       spin_unlock(&gl->gl_spin);
+                       goto add_back_to_lru;
+               }
                clear_bit(GLF_LRU, &gl->gl_flags);
-               spin_unlock(&lru_lock);
                gl->gl_lockref.count++;
                if (demote_ok(gl))
                        handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
                if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
                        gl->gl_lockref.count--;
                spin_unlock(&gl->gl_spin);
-               spin_lock(&lru_lock);
+               cond_resched_lock(&lru_lock);
        }
 }
 
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
                gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
 
                /* Test for being demotable */
-               if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
+               if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
                        list_move(&gl->gl_lru, &dispose);
                        atomic_dec(&lru_count);
                        freed++;
index fc11007..2ffc67d 100644 (file)
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
  * inode_go_inval - prepare a inode glock to be released
  * @gl: the glock
  * @flags:
- * 
- * Normally we invlidate everything, but if we are moving into
+ *
+ * Normally we invalidate everything, but if we are moving into
  * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
  * can keep hold of the metadata, since it won't have changed.
  *
index 91f274d..4fafea1 100644 (file)
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
 
        new_size = old_size + RECOVER_SIZE_INC;
 
-       submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
-       result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS);
+       submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
+       result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
        if (!submit || !result) {
                kfree(submit);
                kfree(result);
index db629d1..f4cb9c0 100644 (file)
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
 
 /**
  * gfs2_free_extlen - Return extent length of free blocks
- * @rbm: Starting position
+ * @rrbm: Starting position
  * @len: Max length to check
  *
  * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
 
 /**
  * gfs2_rlist_free - free a resource group list
- * @list: the list of resource groups
+ * @rlist: the list of resource groups
  *
  */
 
index 38cfcf5..6f0f590 100644 (file)
@@ -1588,9 +1588,12 @@ int jbd2_journal_stop(handle_t *handle)
         * to perform a synchronous write.  We do this to detect the
         * case where a single process is doing a stream of sync
         * writes.  No point in waiting for joiners in that case.
+        *
+        * Setting max_batch_time to 0 disables this completely.
         */
        pid = current->pid;
-       if (handle->h_sync && journal->j_last_sync_writer != pid) {
+       if (handle->h_sync && journal->j_last_sync_writer != pid &&
+           journal->j_max_batch_time) {
                u64 commit_time, trans_time;
 
                journal->j_last_sync_writer = pid;
index e3d37f6..d895b4b 100644 (file)
@@ -39,6 +39,19 @@ struct kernfs_open_node {
        struct list_head        files; /* goes through kernfs_open_file.list */
 };
 
+/*
+ * kernfs_notify() may be called from any context and bounces notifications
+ * through a work item.  To minimize space overhead in kernfs_node, the
+ * pending queue is implemented as a singly linked list of kernfs_nodes.
+ * The list is terminated with the self pointer so that whether a
+ * kernfs_node is on the list or not can be determined by testing the next
+ * pointer for NULL.
+ */
+#define KERNFS_NOTIFY_EOL                      ((void *)&kernfs_notify_list)
+
+static DEFINE_SPINLOCK(kernfs_notify_lock);
+static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
+
 static struct kernfs_open_file *kernfs_of(struct file *file)
 {
        return ((struct seq_file *)file->private_data)->private;
@@ -783,24 +796,25 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
        return DEFAULT_POLLMASK|POLLERR|POLLPRI;
 }
 
-/**
- * kernfs_notify - notify a kernfs file
- * @kn: file to notify
- *
- * Notify @kn such that poll(2) on @kn wakes up.
- */
-void kernfs_notify(struct kernfs_node *kn)
+static void kernfs_notify_workfn(struct work_struct *work)
 {
-       struct kernfs_root *root = kernfs_root(kn);
+       struct kernfs_node *kn;
        struct kernfs_open_node *on;
        struct kernfs_super_info *info;
-       unsigned long flags;
-
-       if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
+repeat:
+       /* pop one off the notify_list */
+       spin_lock_irq(&kernfs_notify_lock);
+       kn = kernfs_notify_list;
+       if (kn == KERNFS_NOTIFY_EOL) {
+               spin_unlock_irq(&kernfs_notify_lock);
                return;
+       }
+       kernfs_notify_list = kn->attr.notify_next;
+       kn->attr.notify_next = NULL;
+       spin_unlock_irq(&kernfs_notify_lock);
 
        /* kick poll */
-       spin_lock_irqsave(&kernfs_open_node_lock, flags);
+       spin_lock_irq(&kernfs_open_node_lock);
 
        on = kn->attr.open;
        if (on) {
@@ -808,12 +822,12 @@ void kernfs_notify(struct kernfs_node *kn)
                wake_up_interruptible(&on->poll);
        }
 
-       spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
+       spin_unlock_irq(&kernfs_open_node_lock);
 
        /* kick fsnotify */
        mutex_lock(&kernfs_mutex);
 
-       list_for_each_entry(info, &root->supers, node) {
+       list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
                struct inode *inode;
                struct dentry *dentry;
 
@@ -833,6 +847,33 @@ void kernfs_notify(struct kernfs_node *kn)
        }
 
        mutex_unlock(&kernfs_mutex);
+       kernfs_put(kn);
+       goto repeat;
+}
+
+/**
+ * kernfs_notify - notify a kernfs file
+ * @kn: file to notify
+ *
+ * Notify @kn such that poll(2) on @kn wakes up.  Maybe be called from any
+ * context.
+ */
+void kernfs_notify(struct kernfs_node *kn)
+{
+       static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
+       unsigned long flags;
+
+       if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
+               return;
+
+       spin_lock_irqsave(&kernfs_notify_lock, flags);
+       if (!kn->attr.notify_next) {
+               kernfs_get(kn);
+               kn->attr.notify_next = kernfs_notify_list;
+               kernfs_notify_list = kn;
+               schedule_work(&kernfs_notify_work);
+       }
+       spin_unlock_irqrestore(&kernfs_notify_lock, flags);
 }
 EXPORT_SYMBOL_GPL(kernfs_notify);
 
index d171b98..f973ae9 100644 (file)
@@ -211,6 +211,36 @@ void kernfs_kill_sb(struct super_block *sb)
        kernfs_put(root_kn);
 }
 
+/**
+ * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root
+ * @kernfs_root: the kernfs_root in question
+ * @ns: the namespace tag
+ *
+ * Pin the superblock so the superblock won't be destroyed in subsequent
+ * operations.  This can be used to block ->kill_sb() which may be useful
+ * for kernfs users which dynamically manage superblocks.
+ *
+ * Returns NULL if there's no superblock associated to this kernfs_root, or
+ * -EINVAL if the superblock is being freed.
+ */
+struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
+{
+       struct kernfs_super_info *info;
+       struct super_block *sb = NULL;
+
+       mutex_lock(&kernfs_mutex);
+       list_for_each_entry(info, &root->supers, node) {
+               if (info->ns == ns) {
+                       sb = info->sb;
+                       if (!atomic_inc_not_zero(&info->sb->s_active))
+                               sb = ERR_PTR(-EINVAL);
+                       break;
+               }
+       }
+       mutex_unlock(&kernfs_mutex);
+       return sb;
+}
+
 void __init kernfs_init(void)
 {
        kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
index da57c9b..717fbc4 100644 (file)
@@ -431,7 +431,7 @@ static int lease_init(struct file *filp, long type, struct file_lock *fl)
        if (assign_type(fl, type) != 0)
                return -EINVAL;
 
-       fl->fl_owner = (fl_owner_t)filp;
+       fl->fl_owner = (fl_owner_t)current->files;
        fl->fl_pid = current->tgid;
 
        fl->fl_file = filp;
index bf166e3..187477d 100644 (file)
@@ -73,6 +73,7 @@
 #include <linux/mbcache.h>
 #include <linux/init.h>
 #include <linux/blockgroup_lock.h>
+#include <linux/log2.h>
 
 #ifdef MB_CACHE_DEBUG
 # define mb_debug(f...) do { \
@@ -93,7 +94,7 @@
 
 #define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
 
-#define MB_CACHE_ENTRY_LOCK_BITS       __builtin_log2(NR_BG_LOCKS)
+#define MB_CACHE_ENTRY_LOCK_BITS       ilog2(NR_BG_LOCKS)
 #define        MB_CACHE_ENTRY_LOCK_INDEX(ce)                   \
        (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
 
index 985c6f3..9eb787e 100644 (file)
@@ -2256,9 +2256,10 @@ done:
                goto out;
        }
        path->dentry = dentry;
-       path->mnt = mntget(nd->path.mnt);
+       path->mnt = nd->path.mnt;
        if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
                return 1;
+       mntget(path->mnt);
        follow_mount(path);
        error = 0;
 out:
index 8f98138..f11b9ee 100644 (file)
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
        spin_unlock(&dreq->lock);
 
        while (!list_empty(&hdr->pages)) {
-               bool do_destroy = true;
 
                req = nfs_list_entry(hdr->pages.next);
                nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
                case NFS_IOHDR_NEED_COMMIT:
                        kref_get(&req->wb_kref);
                        nfs_mark_request_commit(req, hdr->lseg, &cinfo);
-                       do_destroy = false;
                }
                nfs_unlock_and_release_request(req);
        }
index c496f8a..9927913 100644 (file)
@@ -147,6 +147,17 @@ int nfs_sync_mapping(struct address_space *mapping)
        return ret;
 }
 
+static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
+{
+       struct nfs_inode *nfsi = NFS_I(inode);
+
+       if (inode->i_mapping->nrpages == 0)
+               flags &= ~NFS_INO_INVALID_DATA;
+       nfsi->cache_validity |= flags;
+       if (flags & NFS_INO_INVALID_DATA)
+               nfs_fscache_invalidate(inode);
+}
+
 /*
  * Invalidate the local caches
  */
@@ -162,17 +173,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
 
        memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
-               nfs_fscache_invalidate(inode);
-               nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
                                        | NFS_INO_INVALID_DATA
                                        | NFS_INO_INVALID_ACCESS
                                        | NFS_INO_INVALID_ACL
-                                       | NFS_INO_REVAL_PAGECACHE;
+                                       | NFS_INO_REVAL_PAGECACHE);
        } else
-               nfsi->cache_validity |= NFS_INO_INVALID_ATTR
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
                                        | NFS_INO_INVALID_ACCESS
                                        | NFS_INO_INVALID_ACL
-                                       | NFS_INO_REVAL_PAGECACHE;
+                                       | NFS_INO_REVAL_PAGECACHE);
        nfs_zap_label_cache_locked(nfsi);
 }
 
@@ -187,8 +197,7 @@ void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
 {
        if (mapping->nrpages != 0) {
                spin_lock(&inode->i_lock);
-               NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA;
-               nfs_fscache_invalidate(inode);
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
                spin_unlock(&inode->i_lock);
        }
 }
@@ -209,7 +218,7 @@ EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
 void nfs_invalidate_atime(struct inode *inode)
 {
        spin_lock(&inode->i_lock);
-       NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME;
+       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
        spin_unlock(&inode->i_lock);
 }
 EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
@@ -369,7 +378,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                inode->i_mode = fattr->mode;
                if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
                                && nfs_server_capable(inode, NFS_CAP_MODE))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                /* Why so? Because we want revalidate for devices/FIFOs, and
                 * that's precisely what we have in nfs_file_inode_operations.
                 */
@@ -415,36 +424,36 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
                if (fattr->valid & NFS_ATTR_FATTR_ATIME)
                        inode->i_atime = fattr->atime;
                else if (nfs_server_capable(inode, NFS_CAP_ATIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_MTIME)
                        inode->i_mtime = fattr->mtime;
                else if (nfs_server_capable(inode, NFS_CAP_MTIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_CTIME)
                        inode->i_ctime = fattr->ctime;
                else if (nfs_server_capable(inode, NFS_CAP_CTIME))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
                        inode->i_version = fattr->change_attr;
                else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_SIZE)
                        inode->i_size = nfs_size_to_loff_t(fattr->size);
                else
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR
-                               | NFS_INO_REVAL_PAGECACHE;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
+                               | NFS_INO_REVAL_PAGECACHE);
                if (fattr->valid & NFS_ATTR_FATTR_NLINK)
                        set_nlink(inode, fattr->nlink);
                else if (nfs_server_capable(inode, NFS_CAP_NLINK))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_OWNER)
                        inode->i_uid = fattr->uid;
                else if (nfs_server_capable(inode, NFS_CAP_OWNER))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_GROUP)
                        inode->i_gid = fattr->gid;
                else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
-                       nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
                if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
                        inode->i_blocks = fattr->du.nfs2.blocks;
                if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -550,6 +559,9 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
 
        spin_lock(&inode->i_lock);
        i_size_write(inode, offset);
+       /* Optimisation */
+       if (offset == 0)
+               NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
        spin_unlock(&inode->i_lock);
 
        truncate_pagecache(inode, offset);
@@ -578,7 +590,8 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
                        inode->i_uid = attr->ia_uid;
                if ((attr->ia_valid & ATTR_GID) != 0)
                        inode->i_gid = attr->ia_gid;
-               NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+               nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
+                               | NFS_INO_INVALID_ACL);
                spin_unlock(&inode->i_lock);
        }
        if ((attr->ia_valid & ATTR_SIZE) != 0) {
@@ -1101,7 +1114,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
                        && inode->i_version == fattr->pre_change_attr) {
                inode->i_version = fattr->change_attr;
                if (S_ISDIR(inode->i_mode))
-                       nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
                ret |= NFS_INO_INVALID_ATTR;
        }
        /* If we have atomic WCC data, we may update some attributes */
@@ -1117,7 +1130,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
                        && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
                memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
                if (S_ISDIR(inode->i_mode))
-                       nfsi->cache_validity |= NFS_INO_INVALID_DATA;
+                       nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
                ret |= NFS_INO_INVALID_ATTR;
        }
        if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
@@ -1128,9 +1141,6 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
                ret |= NFS_INO_INVALID_ATTR;
        }
 
-       if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
-               nfs_fscache_invalidate(inode);
-
        return ret;
 }
 
@@ -1189,7 +1199,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
                invalid |= NFS_INO_INVALID_ATIME;
 
        if (invalid != 0)
-               nfsi->cache_validity |= invalid;
+               nfs_set_cache_invalid(inode, invalid);
 
        nfsi->read_cache_jiffies = fattr->time_start;
        return 0;
@@ -1402,13 +1412,11 @@ EXPORT_SYMBOL_GPL(nfs_refresh_inode);
 
 static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
 {
-       struct nfs_inode *nfsi = NFS_I(inode);
+       unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
 
-       nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
-       if (S_ISDIR(inode->i_mode)) {
-               nfsi->cache_validity |= NFS_INO_INVALID_DATA;
-               nfs_fscache_invalidate(inode);
-       }
+       if (S_ISDIR(inode->i_mode))
+               invalid |= NFS_INO_INVALID_DATA;
+       nfs_set_cache_invalid(inode, invalid);
        if ((fattr->valid & NFS_ATTR_FATTR) == 0)
                return 0;
        return nfs_refresh_inode_locked(inode, fattr);
@@ -1601,6 +1609,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                        if ((nfsi->npages == 0) || new_isize > cur_isize) {
                                i_size_write(inode, new_isize);
                                invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
+                               invalid &= ~NFS_INO_REVAL_PAGECACHE;
                        }
                        dprintk("NFS: isize change on server for file %s/%ld "
                                        "(%Ld to %Ld)\n",
@@ -1702,10 +1711,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
                invalid &= ~NFS_INO_INVALID_DATA;
        if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) ||
                        (save_cache_validity & NFS_INO_REVAL_FORCED))
-               nfsi->cache_validity |= invalid;
-
-       if (invalid & NFS_INO_INVALID_DATA)
-               nfs_fscache_invalidate(inode);
+               nfs_set_cache_invalid(inode, invalid);
 
        return 0;
  out_err:
index 82ddbf4..f415cbf 100644 (file)
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
 int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
 int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
                      const struct rpc_call_ops *, int, int);
+void nfs_free_request(struct nfs_page *req);
 
 static inline void nfs_iocounter_init(struct nfs_io_counter *c)
 {
index 871d6ed..8f854dd 100644 (file)
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
        &posix_acl_default_xattr_handler,
        NULL,
 };
+
+static int
+nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
+               size_t size, ssize_t *result)
+{
+       struct posix_acl *acl;
+       char *p = data + *result;
+
+       acl = get_acl(inode, type);
+       if (!acl)
+               return 0;
+
+       posix_acl_release(acl);
+
+       *result += strlen(name);
+       *result += 1;
+       if (!size)
+               return 0;
+       if (*result > size)
+               return -ERANGE;
+
+       strcpy(p, name);
+       return 0;
+}
+
+ssize_t
+nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
+{
+       struct inode *inode = dentry->d_inode;
+       ssize_t result = 0;
+       int error;
+
+       error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
+                       POSIX_ACL_XATTR_ACCESS, data, size, &result);
+       if (error)
+               return error;
+
+       error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
+                       POSIX_ACL_XATTR_DEFAULT, data, size, &result);
+       if (error)
+               return error;
+       return result;
+}
index e7daa42..f0afa29 100644 (file)
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
-       .listxattr      = generic_listxattr,
+       .listxattr      = nfs3_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
        .getattr        = nfs_getattr,
        .setattr        = nfs_setattr,
 #ifdef CONFIG_NFS_V3_ACL
-       .listxattr      = generic_listxattr,
+       .listxattr      = nfs3_listxattr,
        .getxattr       = generic_getxattr,
        .setxattr       = generic_setxattr,
        .removexattr    = generic_removexattr,
index f63cb87..ba2affa 100644 (file)
@@ -230,7 +230,7 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
 extern struct file_system_type nfs4_fs_type;
 
 /* nfs4namespace.c */
-struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
+struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, struct qstr *);
 struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
                               struct nfs_fh *, struct nfs_fattr *);
 int nfs4_replace_transport(struct nfs_server *server,
index 3d5dbf8..3d83cb1 100644 (file)
@@ -139,16 +139,22 @@ static size_t nfs_parse_server_name(char *string, size_t len,
  * @server: NFS server struct
  * @flavors: List of security tuples returned by SECINFO procedure
  *
- * Return the pseudoflavor of the first security mechanism in
- * "flavors" that is locally supported.  Return RPC_AUTH_UNIX if
- * no matching flavor is found in the array.  The "flavors" array
+ * Return an rpc client that uses the first security mechanism in
+ * "flavors" that is locally supported.  The "flavors" array
  * is searched in the order returned from the server, per RFC 3530
- * recommendation.
+ * recommendation and each flavor is checked for membership in the
+ * sec= mount option list if it exists.
+ *
+ * Return -EPERM if no matching flavor is found in the array.
+ *
+ * Please call rpc_shutdown_client() when you are done with this rpc client.
+ *
  */
-static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server,
+static struct rpc_clnt *nfs_find_best_sec(struct rpc_clnt *clnt,
+                                         struct nfs_server *server,
                                          struct nfs4_secinfo_flavors *flavors)
 {
-       rpc_authflavor_t pseudoflavor;
+       rpc_authflavor_t pflavor;
        struct nfs4_secinfo4 *secinfo;
        unsigned int i;
 
@@ -159,62 +165,73 @@ static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server,
                case RPC_AUTH_NULL:
                case RPC_AUTH_UNIX:
                case RPC_AUTH_GSS:
-                       pseudoflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
+                       pflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
                                                        &secinfo->flavor_info);
-                       /* make sure pseudoflavor matches sec= mount opt */
-                       if (pseudoflavor != RPC_AUTH_MAXFLAVOR &&
-                           nfs_auth_info_match(&server->auth_info,
-                                               pseudoflavor))
-                               return pseudoflavor;
-                       break;
+                       /* does the pseudoflavor match a sec= mount opt? */
+                       if (pflavor != RPC_AUTH_MAXFLAVOR &&
+                           nfs_auth_info_match(&server->auth_info, pflavor)) {
+                               struct rpc_clnt *new;
+                               struct rpc_cred *cred;
+
+                               /* Cloning creates an rpc_auth for the flavor */
+                               new = rpc_clone_client_set_auth(clnt, pflavor);
+                               if (IS_ERR(new))
+                                       continue;
+                               /**
+                               * Check that the user actually can use the
+                               * flavor. This is mostly for RPC_AUTH_GSS
+                               * where cr_init obtains a gss context
+                               */
+                               cred = rpcauth_lookupcred(new->cl_auth, 0);
+                               if (IS_ERR(cred)) {
+                                       rpc_shutdown_client(new);
+                                       continue;
+                               }
+                               put_rpccred(cred);
+                               return new;
+                       }
                }
        }
-
-       /* if there were any sec= options then nothing matched */
-       if (server->auth_info.flavor_len > 0)
-               return -EPERM;
-
-       return RPC_AUTH_UNIX;
+       return ERR_PTR(-EPERM);
 }
 
-static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
+/**
+ * nfs4_negotiate_security - in response to an NFS4ERR_WRONGSEC on lookup,
+ * return an rpc_clnt that uses the best available security flavor with
+ * respect to the secinfo flavor list and the sec= mount options.
+ *
+ * @clnt: RPC client to clone
+ * @inode: directory inode
+ * @name: lookup name
+ *
+ * Please call rpc_shutdown_client() when you are done with this rpc client.
+ */
+struct rpc_clnt *
+nfs4_negotiate_security(struct rpc_clnt *clnt, struct inode *inode,
+                                       struct qstr *name)
 {
        struct page *page;
        struct nfs4_secinfo_flavors *flavors;
-       rpc_authflavor_t flavor;
+       struct rpc_clnt *new;
        int err;
 
        page = alloc_page(GFP_KERNEL);
        if (!page)
-               return -ENOMEM;
+               return ERR_PTR(-ENOMEM);
+
        flavors = page_address(page);
 
        err = nfs4_proc_secinfo(inode, name, flavors);
        if (err < 0) {
-               flavor = err;
+               new = ERR_PTR(err);
                goto out;
        }
 
-       flavor = nfs_find_best_sec(NFS_SERVER(inode), flavors);
+       new = nfs_find_best_sec(clnt, NFS_SERVER(inode), flavors);
 
 out:
        put_page(page);
-       return flavor;
-}
-
-/*
- * Please call rpc_shutdown_client() when you are done with this client.
- */
-struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
-                                       struct qstr *name)
-{
-       rpc_authflavor_t flavor;
-
-       flavor = nfs4_negotiate_security(inode, name);
-       if ((int)flavor < 0)
-               return ERR_PTR((int)flavor);
-
-       return rpc_clone_client_set_auth(clnt, flavor);
+       return new;
 }
 
 static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
@@ -397,11 +414,6 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
 
        if (client->cl_auth->au_flavor != flavor)
                flavor = client->cl_auth->au_flavor;
-       else {
-               rpc_authflavor_t new = nfs4_negotiate_security(dir, name);
-               if ((int)new >= 0)
-                       flavor = new;
-       }
        mnt = nfs_do_submount(dentry, fh, fattr, flavor);
 out:
        rpc_shutdown_client(client);
index 285ad53..4bf3d97 100644 (file)
@@ -3247,7 +3247,7 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
                        err = -EPERM;
                        if (client != *clnt)
                                goto out;
-                       client = nfs4_create_sec_client(client, dir, name);
+                       client = nfs4_negotiate_security(client, dir, name);
                        if (IS_ERR(client))
                                return PTR_ERR(client);
 
index b6ee3a6..17fab89 100644 (file)
@@ -29,8 +29,6 @@
 static struct kmem_cache *nfs_page_cachep;
 static const struct rpc_call_ops nfs_pgio_common_ops;
 
-static void nfs_free_request(struct nfs_page *);
-
 static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
 {
        p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
        WARN_ON_ONCE(prev == req);
 
        if (!prev) {
+               /* a head request */
                req->wb_head = req;
                req->wb_this_page = req;
        } else {
+               /* a subrequest */
                WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
                WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
                req->wb_head = prev->wb_head;
                req->wb_this_page = prev->wb_this_page;
                prev->wb_this_page = req;
 
+               /* All subrequests take a ref on the head request until
+                * nfs_page_group_destroy is called */
+               kref_get(&req->wb_head->wb_kref);
+
                /* grab extra ref if head request has extra ref from
                 * the write/commit path to handle handoff between write
                 * and commit lists */
-               if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags))
+               if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
+                       set_bit(PG_INODE_REF, &req->wb_flags);
                        kref_get(&req->wb_kref);
+               }
        }
 }
 
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
        struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
        struct nfs_page *tmp, *next;
 
+       /* subrequests must release the ref on the head request */
+       if (req->wb_head != req)
+               nfs_release_request(req->wb_head);
+
        if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
                return;
 
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
  *
  * Note: Should never be called with the spinlock held!
  */
-static void nfs_free_request(struct nfs_page *req)
+void nfs_free_request(struct nfs_page *req)
 {
        WARN_ON_ONCE(req->wb_this_page != req);
 
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
                        nfs_pageio_doio(desc);
                        if (desc->pg_error < 0)
                                return 0;
-                       desc->pg_moreio = 0;
                        if (desc->pg_recoalesce)
                                return 0;
                        /* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
                desc->pg_count = 0;
                desc->pg_base = 0;
                desc->pg_recoalesce = 0;
+               desc->pg_moreio = 0;
 
                while (!list_empty(&head)) {
                        struct nfs_page *req;
index 3ee5af4..5e2f103 100644 (file)
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
 static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
 static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
 static const struct nfs_rw_ops nfs_rw_write_ops;
+static void nfs_clear_request_commit(struct nfs_page *req);
 
 static struct kmem_cache *nfs_wdata_cachep;
 static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
        set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
 }
 
+/*
+ * nfs_page_find_head_request_locked - find head request associated with @page
+ *
+ * must be called while holding the inode lock.
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
 static struct nfs_page *
-nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
+nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
 {
        struct nfs_page *req = NULL;
 
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
                /* Linearly search the commit list for the correct req */
                list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
                        if (freq->wb_page == page) {
-                               req = freq;
+                               req = freq->wb_head;
                                break;
                        }
                }
        }
 
-       if (req)
+       if (req) {
+               WARN_ON_ONCE(req->wb_head != req);
+
                kref_get(&req->wb_kref);
+       }
 
        return req;
 }
 
-static struct nfs_page *nfs_page_find_request(struct page *page)
+/*
+ * nfs_page_find_head_request - find head request associated with @page
+ *
+ * returns matching head request with reference held, or NULL if not found.
+ */
+static struct nfs_page *nfs_page_find_head_request(struct page *page)
 {
        struct inode *inode = page_file_mapping(page)->host;
        struct nfs_page *req = NULL;
 
        spin_lock(&inode->i_lock);
-       req = nfs_page_find_request_locked(NFS_I(inode), page);
+       req = nfs_page_find_head_request_locked(NFS_I(inode), page);
        spin_unlock(&inode->i_lock);
        return req;
 }
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
                clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
 }
 
-static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock)
+
+/* nfs_page_group_clear_bits
+ *   @req - an nfs request
+ * clears all page group related bits from @req
+ */
+static void
+nfs_page_group_clear_bits(struct nfs_page *req)
+{
+       clear_bit(PG_TEARDOWN, &req->wb_flags);
+       clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
+       clear_bit(PG_UPTODATE, &req->wb_flags);
+       clear_bit(PG_WB_END, &req->wb_flags);
+       clear_bit(PG_REMOVE, &req->wb_flags);
+}
+
+
+/*
+ * nfs_unroll_locks_and_wait -  unlock all newly locked reqs and wait on @req
+ *
+ * this is a helper function for nfs_lock_and_join_requests
+ *
+ * @inode - inode associated with request page group, must be holding inode lock
+ * @head  - head request of page group, must be holding head lock
+ * @req   - request that couldn't lock and needs to wait on the req bit lock
+ * @nonblock - if true, don't actually wait
+ *
+ * NOTE: this must be called holding page_group bit lock and inode spin lock
+ *       and BOTH will be released before returning.
+ *
+ * returns 0 on success, < 0 on error.
+ */
+static int
+nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
+                         struct nfs_page *req, bool nonblock)
+       __releases(&inode->i_lock)
+{
+       struct nfs_page *tmp;
+       int ret;
+
+       /* relinquish all the locks successfully grabbed this run */
+       for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
+               nfs_unlock_request(tmp);
+
+       WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
+
+       /* grab a ref on the request that will be waited on */
+       kref_get(&req->wb_kref);
+
+       nfs_page_group_unlock(head);
+       spin_unlock(&inode->i_lock);
+
+       /* release ref from nfs_page_find_head_request_locked */
+       nfs_release_request(head);
+
+       if (!nonblock)
+               ret = nfs_wait_on_request(req);
+       else
+               ret = -EAGAIN;
+       nfs_release_request(req);
+
+       return ret;
+}
+
+/*
+ * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
+ *
+ * @destroy_list - request list (using wb_this_page) terminated by @old_head
+ * @old_head - the old head of the list
+ *
+ * All subrequests must be locked and removed from all lists, so at this point
+ * they are only "active" in this function, and possibly in nfs_wait_on_request
+ * with a reference held by some other context.
+ */
+static void
+nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+                                struct nfs_page *old_head)
+{
+       while (destroy_list) {
+               struct nfs_page *subreq = destroy_list;
+
+               destroy_list = (subreq->wb_this_page == old_head) ?
+                                  NULL : subreq->wb_this_page;
+
+               WARN_ON_ONCE(old_head != subreq->wb_head);
+
+               /* make sure old group is not used */
+               subreq->wb_head = subreq;
+               subreq->wb_this_page = subreq;
+
+               nfs_clear_request_commit(subreq);
+
+               /* subreq is now totally disconnected from page group or any
+                * write / commit lists. last chance to wake any waiters */
+               nfs_unlock_request(subreq);
+
+               if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
+                       /* release ref on old head request */
+                       nfs_release_request(old_head);
+
+                       nfs_page_group_clear_bits(subreq);
+
+                       /* release the PG_INODE_REF reference */
+                       if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
+                               nfs_release_request(subreq);
+                       else
+                               WARN_ON_ONCE(1);
+               } else {
+                       WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
+                       /* zombie requests have already released the last
+                        * reference and were waiting on the rest of the
+                        * group to complete. Since it's no longer part of a
+                        * group, simply free the request */
+                       nfs_page_group_clear_bits(subreq);
+                       nfs_free_request(subreq);
+               }
+       }
+}
+
+/*
+ * nfs_lock_and_join_requests - join all subreqs to the head req and return
+ *                              a locked reference, cancelling any pending
+ *                              operations for this page.
+ *
+ * @page - the page used to lookup the "page group" of nfs_page structures
+ * @nonblock - if true, don't block waiting for request locks
+ *
+ * This function joins all sub requests to the head request by first
+ * locking all requests in the group, cancelling any pending operations
+ * and finally updating the head request to cover the whole range covered by
+ * the (former) group.  All subrequests are removed from any write or commit
+ * lists, unlinked from the group and destroyed.
+ *
+ * Returns a locked, referenced pointer to the head request - which after
+ * this call is guaranteed to be the only request associated with the page.
+ * Returns NULL if no requests are found for @page, or a ERR_PTR if an
+ * error was encountered.
+ */
+static struct nfs_page *
+nfs_lock_and_join_requests(struct page *page, bool nonblock)
 {
        struct inode *inode = page_file_mapping(page)->host;
-       struct nfs_page *req;
+       struct nfs_page *head, *subreq;
+       struct nfs_page *destroy_list = NULL;
+       unsigned int total_bytes;
        int ret;
 
+try_again:
+       total_bytes = 0;
+
+       WARN_ON_ONCE(destroy_list);
+
        spin_lock(&inode->i_lock);
-       for (;;) {
-               req = nfs_page_find_request_locked(NFS_I(inode), page);
-               if (req == NULL)
-                       break;
-               if (nfs_lock_request(req))
-                       break;
-               /* Note: If we hold the page lock, as is the case in nfs_writepage,
-                *       then the call to nfs_lock_request() will always
-                *       succeed provided that someone hasn't already marked the
-                *       request as dirty (in which case we don't care).
-                */
+
+       /*
+        * A reference is taken only on the head request which acts as a
+        * reference to the whole page group - the group will not be destroyed
+        * until the head reference is released.
+        */
+       head = nfs_page_find_head_request_locked(NFS_I(inode), page);
+
+       if (!head) {
                spin_unlock(&inode->i_lock);
-               if (!nonblock)
-                       ret = nfs_wait_on_request(req);
-               else
-                       ret = -EAGAIN;
-               nfs_release_request(req);
-               if (ret != 0)
+               return NULL;
+       }
+
+       /* lock each request in the page group */
+       nfs_page_group_lock(head);
+       subreq = head;
+       do {
+               /*
+                * Subrequests are always contiguous, non overlapping
+                * and in order. If not, it's a programming error.
+                */
+               WARN_ON_ONCE(subreq->wb_offset !=
+                    (head->wb_offset + total_bytes));
+
+               /* keep track of how many bytes this group covers */
+               total_bytes += subreq->wb_bytes;
+
+               if (!nfs_lock_request(subreq)) {
+                       /* releases page group bit lock and
+                        * inode spin lock and all references */
+                       ret = nfs_unroll_locks_and_wait(inode, head,
+                               subreq, nonblock);
+
+                       if (ret == 0)
+                               goto try_again;
+
                        return ERR_PTR(ret);
-               spin_lock(&inode->i_lock);
+               }
+
+               subreq = subreq->wb_this_page;
+       } while (subreq != head);
+
+       /* Now that all requests are locked, make sure they aren't on any list.
+        * Commit list removal accounting is done after locks are dropped */
+       subreq = head;
+       do {
+               nfs_list_remove_request(subreq);
+               subreq = subreq->wb_this_page;
+       } while (subreq != head);
+
+       /* unlink subrequests from head, destroy them later */
+       if (head->wb_this_page != head) {
+               /* destroy list will be terminated by head */
+               destroy_list = head->wb_this_page;
+               head->wb_this_page = head;
+
+               /* change head request to cover whole range that
+                * the former page group covered */
+               head->wb_bytes = total_bytes;
        }
+
+       /*
+        * prepare head request to be added to new pgio descriptor
+        */
+       nfs_page_group_clear_bits(head);
+
+       /*
+        * some part of the group was still on the inode list - otherwise
+        * the group wouldn't be involved in async write.
+        * grab a reference for the head request, iff it needs one.
+        */
+       if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
+               kref_get(&head->wb_kref);
+
+       nfs_page_group_unlock(head);
+
+       /* drop lock to clear_request_commit the head req and clean up
+        * requests on destroy list */
        spin_unlock(&inode->i_lock);
-       return req;
+
+       nfs_destroy_unlinked_subrequests(destroy_list, head);
+
+       /* clean up commit list state */
+       nfs_clear_request_commit(head);
+
+       /* still holds ref on head from nfs_page_find_head_request_locked
+        * and still has lock on head from lock loop */
+       return head;
 }
 
 /*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
        struct nfs_page *req;
        int ret = 0;
 
-       req = nfs_find_and_lock_request(page, nonblock);
+       req = nfs_lock_and_join_requests(page, nonblock);
        if (!req)
                goto out;
        ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
                set_page_private(req->wb_page, (unsigned long)req);
        }
        nfsi->npages++;
-       set_bit(PG_INODE_REF, &req->wb_flags);
+       /* this a head request for a page group - mark it as having an
+        * extra reference so sub groups can follow suit */
+       WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
        kref_get(&req->wb_kref);
        spin_unlock(&inode->i_lock);
 }
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
                nfsi->npages--;
                spin_unlock(&inode->i_lock);
        }
-       nfs_release_request(req);
+
+       if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
+               nfs_release_request(req);
 }
 
 static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
 {
        struct nfs_commit_info cinfo;
        unsigned long bytes = 0;
-       bool do_destroy;
 
        if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
                goto out;
@@ -668,7 +897,6 @@ remove_req:
 next:
                nfs_unlock_request(req);
                nfs_end_page_writeback(req);
-               do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
                nfs_release_request(req);
        }
 out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
        spin_lock(&inode->i_lock);
 
        for (;;) {
-               req = nfs_page_find_request_locked(NFS_I(inode), page);
+               req = nfs_page_find_head_request_locked(NFS_I(inode), page);
                if (req == NULL)
                        goto out_unlock;
 
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
         * dropped page.
         */
        do {
-               req = nfs_page_find_request(page);
+               req = nfs_page_find_head_request(page);
                if (req == NULL)
                        return 0;
                l_ctx = req->wb_lock_context;
@@ -934,12 +1162,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
 
        if (nfs_have_delegated_attributes(inode))
                goto out;
-       if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE))
+       if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
                return false;
        smp_rmb();
        if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
                return false;
 out:
+       if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
+               return false;
        return PageUptodate(page) != 0;
 }
 
@@ -1567,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
        struct nfs_page *req;
        int ret = 0;
 
-       for (;;) {
-               wait_on_page_writeback(page);
-               req = nfs_page_find_request(page);
-               if (req == NULL)
-                       break;
-               if (nfs_lock_request(req)) {
-                       nfs_clear_request_commit(req);
-                       nfs_inode_remove_request(req);
-                       /*
-                        * In case nfs_inode_remove_request has marked the
-                        * page as being dirty
-                        */
-                       cancel_dirty_page(page, PAGE_CACHE_SIZE);
-                       nfs_unlock_and_release_request(req);
-                       break;
-               }
-               ret = nfs_wait_on_request(req);
-               nfs_release_request(req);
-               if (ret < 0)
-                       break;
+       wait_on_page_writeback(page);
+
+       /* blocking call to cancel all requests and join to a single (head)
+        * request */
+       req = nfs_lock_and_join_requests(page, false);
+
+       if (IS_ERR(req)) {
+               ret = PTR_ERR(req);
+       } else if (req) {
+               /* all requests from this page have been cancelled by
+                * nfs_lock_and_join_requests, so just remove the head
+                * request from the inode / page_private pointer and
+                * release it */
+               nfs_inode_remove_request(req);
+               /*
+                * In case nfs_inode_remove_request has marked the
+                * page as being dirty
+                */
+               cancel_dirty_page(page, PAGE_CACHE_SIZE);
+               nfs_unlock_and_release_request(req);
        }
+
        return ret;
 }
 
index 6851b00..8f029db 100644 (file)
@@ -617,15 +617,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
 
        switch (create->cr_type) {
        case NF4LNK:
-               /* ugh! we have to null-terminate the linktext, or
-                * vfs_symlink() will choke.  it is always safe to
-                * null-terminate by brute force, since at worst we
-                * will overwrite the first byte of the create namelen
-                * in the XDR buffer, which has already been extracted
-                * during XDR decode.
-                */
-               create->cr_linkname[create->cr_linklen] = 0;
-
                status = nfsd_symlink(rqstp, &cstate->current_fh,
                                      create->cr_name, create->cr_namelen,
                                      create->cr_linkname, create->cr_linklen,
index c0d45ce..2204e1f 100644 (file)
@@ -41,6 +41,7 @@
 #include <linux/ratelimit.h>
 #include <linux/sunrpc/svcauth_gss.h>
 #include <linux/sunrpc/addr.h>
+#include <linux/hash.h>
 #include "xdr4.h"
 #include "xdr4cb.h"
 #include "vfs.h"
@@ -364,6 +365,79 @@ static struct nfs4_ol_stateid * nfs4_alloc_stateid(struct nfs4_client *clp)
        return openlockstateid(nfs4_alloc_stid(clp, stateid_slab));
 }
 
+/*
+ * When we recall a delegation, we should be careful not to hand it
+ * out again straight away.
+ * To ensure this we keep a pair of bloom filters ('new' and 'old')
+ * in which the filehandles of recalled delegations are "stored".
+ * If a filehandle appear in either filter, a delegation is blocked.
+ * When a delegation is recalled, the filehandle is stored in the "new"
+ * filter.
+ * Every 30 seconds we swap the filters and clear the "new" one,
+ * unless both are empty of course.
+ *
+ * Each filter is 256 bits.  We hash the filehandle to 32bit and use the
+ * low 3 bytes as hash-table indices.
+ *
+ * 'state_lock', which is always held when block_delegations() is called,
+ * is used to manage concurrent access.  Testing does not need the lock
+ * except when swapping the two filters.
+ */
+static struct bloom_pair {
+       int     entries, old_entries;
+       time_t  swap_time;
+       int     new; /* index into 'set' */
+       DECLARE_BITMAP(set[2], 256);
+} blocked_delegations;
+
+static int delegation_blocked(struct knfsd_fh *fh)
+{
+       u32 hash;
+       struct bloom_pair *bd = &blocked_delegations;
+
+       if (bd->entries == 0)
+               return 0;
+       if (seconds_since_boot() - bd->swap_time > 30) {
+               spin_lock(&state_lock);
+               if (seconds_since_boot() - bd->swap_time > 30) {
+                       bd->entries -= bd->old_entries;
+                       bd->old_entries = bd->entries;
+                       memset(bd->set[bd->new], 0,
+                              sizeof(bd->set[0]));
+                       bd->new = 1-bd->new;
+                       bd->swap_time = seconds_since_boot();
+               }
+               spin_unlock(&state_lock);
+       }
+       hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
+       if (test_bit(hash&255, bd->set[0]) &&
+           test_bit((hash>>8)&255, bd->set[0]) &&
+           test_bit((hash>>16)&255, bd->set[0]))
+               return 1;
+
+       if (test_bit(hash&255, bd->set[1]) &&
+           test_bit((hash>>8)&255, bd->set[1]) &&
+           test_bit((hash>>16)&255, bd->set[1]))
+               return 1;
+
+       return 0;
+}
+
+static void block_delegations(struct knfsd_fh *fh)
+{
+       u32 hash;
+       struct bloom_pair *bd = &blocked_delegations;
+
+       hash = arch_fast_hash(&fh->fh_base, fh->fh_size, 0);
+
+       __set_bit(hash&255, bd->set[bd->new]);
+       __set_bit((hash>>8)&255, bd->set[bd->new]);
+       __set_bit((hash>>16)&255, bd->set[bd->new]);
+       if (bd->entries == 0)
+               bd->swap_time = seconds_since_boot();
+       bd->entries += 1;
+}
+
 static struct nfs4_delegation *
 alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct svc_fh *current_fh)
 {
@@ -372,6 +446,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_ol_stateid *stp, struct sv
        dprintk("NFSD alloc_init_deleg\n");
        if (num_delegations > max_delegations)
                return NULL;
+       if (delegation_blocked(&current_fh->fh_handle))
+               return NULL;
        dp = delegstateid(nfs4_alloc_stid(clp, deleg_slab));
        if (dp == NULL)
                return dp;
@@ -2770,6 +2846,8 @@ static void nfsd_break_one_deleg(struct nfs4_delegation *dp)
        /* Only place dl_time is set; protected by i_lock: */
        dp->dl_time = get_seconds();
 
+       block_delegations(&dp->dl_fh);
+
        nfsd4_cb_recall(dp);
 }
 
index 2d305a1..944275c 100644 (file)
@@ -600,7 +600,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
                READ_BUF(4);
                create->cr_linklen = be32_to_cpup(p++);
                READ_BUF(create->cr_linklen);
-               SAVEMEM(create->cr_linkname, create->cr_linklen);
+               /*
+                * The VFS will want a null-terminated string, and
+                * null-terminating in place isn't safe since this might
+                * end on a page boundary:
+                */
+               create->cr_linkname =
+                               kmalloc(create->cr_linklen + 1, GFP_KERNEL);
+               if (!create->cr_linkname)
+                       return nfserr_jukebox;
+               memcpy(create->cr_linkname, p, create->cr_linklen);
+               create->cr_linkname[create->cr_linklen] = '\0';
+               defer_free(argp, kfree, create->cr_linkname);
                break;
        case NF4BLK:
        case NF4CHR:
@@ -2630,7 +2641,7 @@ nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
 {
        __be32 *p;
 
-       p = xdr_reserve_space(xdr, 6);
+       p = xdr_reserve_space(xdr, 20);
        if (!p)
                return NULL;
        *p++ = htonl(2);
@@ -2687,6 +2698,7 @@ nfsd4_encode_dirent(void *ccdv, const char *name, int namlen,
                nfserr = nfserr_toosmall;
                goto fail;
        case nfserr_noent:
+               xdr_truncate_encode(xdr, start_offset);
                goto skip_entry;
        default:
                /*
@@ -2867,6 +2879,7 @@ again:
                 * return the conflicting open:
                 */
                if (conf->len) {
+                       kfree(conf->data);
                        conf->len = 0;
                        conf->data = NULL;
                        goto again;
@@ -2879,6 +2892,7 @@ again:
        if (conf->len) {
                p = xdr_encode_opaque_fixed(p, &ld->ld_clientid, 8);
                p = xdr_encode_opaque(p, conf->data, conf->len);
+               kfree(conf->data);
        }  else {  /* non - nfsv4 lock in conflict, no clientid nor owner */
                p = xdr_encode_hyper(p, (u64)0); /* clientid */
                *p++ = cpu_to_be32(0); /* length of owner name */
@@ -2895,7 +2909,7 @@ nfsd4_encode_lock(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_lo
                nfserr = nfsd4_encode_stateid(xdr, &lock->lk_resp_stateid);
        else if (nfserr == nfserr_denied)
                nfserr = nfsd4_encode_lock_denied(xdr, &lock->lk_denied);
-       kfree(lock->lk_denied.ld_owner.data);
+
        return nfserr;
 }
 
@@ -3266,7 +3280,7 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
 
        wire_count = htonl(maxcount);
        write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
-       xdr_truncate_encode(xdr, length_offset + 4 + maxcount);
+       xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
        if (maxcount & 3)
                write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
                                                &zero, 4 - (maxcount&3));
index a106b3f..fae17c6 100644 (file)
@@ -331,6 +331,7 @@ struct dlm_lock_resource
        u16 state;
        char lvb[DLM_LVB_LEN];
        unsigned int inflight_locks;
+       unsigned int inflight_assert_workers;
        unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
 };
 
@@ -910,6 +911,9 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
 void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
                                   struct dlm_lock_resource *res);
 
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res);
+
 void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
 void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
index 3087a21..82abf0c 100644 (file)
@@ -581,6 +581,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
        atomic_set(&res->asts_reserved, 0);
        res->migration_pending = 0;
        res->inflight_locks = 0;
+       res->inflight_assert_workers = 0;
 
        res->dlm = dlm;
 
@@ -683,6 +684,43 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
        wake_up(&res->wq);
 }
 
+void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       res->inflight_assert_workers++;
+       mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
+                       dlm->name, res->lockname.len, res->lockname.name,
+                       res->inflight_assert_workers);
+}
+
+static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       spin_lock(&res->spinlock);
+       __dlm_lockres_grab_inflight_worker(dlm, res);
+       spin_unlock(&res->spinlock);
+}
+
+static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       assert_spin_locked(&res->spinlock);
+       BUG_ON(res->inflight_assert_workers == 0);
+       res->inflight_assert_workers--;
+       mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
+                       dlm->name, res->lockname.len, res->lockname.name,
+                       res->inflight_assert_workers);
+}
+
+static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
+               struct dlm_lock_resource *res)
+{
+       spin_lock(&res->spinlock);
+       __dlm_lockres_drop_inflight_worker(dlm, res);
+       spin_unlock(&res->spinlock);
+}
+
 /*
  * lookup a lock resource by name.
  * may already exist in the hashtable.
@@ -1603,7 +1641,8 @@ send_response:
                        mlog(ML_ERROR, "failed to dispatch assert master work\n");
                        response = DLM_MASTER_RESP_ERROR;
                        dlm_lockres_put(res);
-               }
+               } else
+                       dlm_lockres_grab_inflight_worker(dlm, res);
        } else {
                if (res)
                        dlm_lockres_put(res);
@@ -2118,6 +2157,8 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
        dlm_lockres_release_ast(dlm, res);
 
 put:
+       dlm_lockres_drop_inflight_worker(dlm, res);
+
        dlm_lockres_put(res);
 
        mlog(0, "finished with dlm_assert_master_worker\n");
@@ -3088,11 +3129,15 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
                        /* remove it so that only one mle will be found */
                        __dlm_unlink_mle(dlm, tmp);
                        __dlm_mle_detach_hb_events(dlm, tmp);
-                       ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
-                       mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
-                           "telling master to get ref for cleared out mle "
-                           "during migration\n", dlm->name, namelen, name,
-                           master, new_master);
+                       if (tmp->type == DLM_MLE_MASTER) {
+                               ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
+                               mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
+                                               "telling master to get ref "
+                                               "for cleared out mle during "
+                                               "migration\n", dlm->name,
+                                               namelen, name, master,
+                                               new_master);
+                       }
                }
                spin_unlock(&tmp->spinlock);
        }
index 5de0194..45067fa 100644 (file)
@@ -1708,7 +1708,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
                                mlog_errno(-ENOMEM);
                                /* retry!? */
                                BUG();
-                       }
+                       } else
+                               __dlm_lockres_grab_inflight_worker(dlm, res);
                } else /* put.. incase we are not the master */
                        dlm_lockres_put(res);
                spin_unlock(&res->spinlock);
index 9db869d..69aac6f 100644 (file)
@@ -259,12 +259,15 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
                 * refs on it. */
                unused = __dlm_lockres_unused(lockres);
                if (!unused ||
-                   (lockres->state & DLM_LOCK_RES_MIGRATING)) {
+                   (lockres->state & DLM_LOCK_RES_MIGRATING) ||
+                   (lockres->inflight_assert_workers != 0)) {
                        mlog(0, "%s: res %.*s is in use or being remastered, "
-                            "used %d, state %d\n", dlm->name,
-                            lockres->lockname.len, lockres->lockname.name,
-                            !unused, lockres->state);
-                       list_move_tail(&dlm->purge_list, &lockres->purge);
+                            "used %d, state %d, assert master workers %u\n",
+                            dlm->name, lockres->lockname.len,
+                            lockres->lockname.name,
+                            !unused, lockres->state,
+                            lockres->inflight_assert_workers);
+                       list_move_tail(&lockres->purge, &dlm->purge_list);
                        spin_unlock(&lockres->spinlock);
                        continue;
                }
index 5698b52..2e3c9db 100644 (file)
@@ -191,7 +191,9 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                                     DLM_UNLOCK_CLEAR_CONVERT_TYPE);
                } else if (status == DLM_RECOVERING ||
                           status == DLM_MIGRATING ||
-                          status == DLM_FORWARD) {
+                          status == DLM_FORWARD ||
+                          status == DLM_NOLOCKMGR
+                          ) {
                        /* must clear the actions because this unlock
                         * is about to be retried.  cannot free or do
                         * any list manipulation. */
@@ -200,7 +202,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
                             res->lockname.name,
                             status==DLM_RECOVERING?"recovering":
                             (status==DLM_MIGRATING?"migrating":
-                             "forward"));
+                               (status == DLM_FORWARD ? "forward" :
+                                               "nolockmanager")));
                        actions = 0;
                }
                if (flags & LKM_CANCEL)
@@ -364,7 +367,10 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
                         * updated state to the recovery master.  this thread
                         * just needs to finish out the operation and call
                         * the unlockast. */
-                       ret = DLM_NORMAL;
+                       if (dlm_is_node_dead(dlm, owner))
+                               ret = DLM_NORMAL;
+                       else
+                               ret = DLM_NOLOCKMGR;
                } else {
                        /* something bad.  this will BUG in ocfs2 */
                        ret = dlm_err_to_dlm_status(tmpret);
@@ -638,7 +644,9 @@ retry:
 
        if (status == DLM_RECOVERING ||
            status == DLM_MIGRATING ||
-           status == DLM_FORWARD) {
+           status == DLM_FORWARD ||
+           status == DLM_NOLOCKMGR) {
+
                /* We want to go away for a tiny bit to allow recovery
                 * / migration to complete on this resource. I don't
                 * know of any wait queue we could sleep on as this
@@ -650,7 +658,7 @@ retry:
                msleep(50);
 
                mlog(0, "retrying unlock due to pending recovery/"
-                    "migration/in-progress\n");
+                    "migration/in-progress/reconnect\n");
                goto retry;
        }
 
index 2060fc3..8add6f1 100644 (file)
@@ -205,6 +205,21 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
        return inode;
 }
 
+static void ocfs2_cleanup_add_entry_failure(struct ocfs2_super *osb,
+               struct dentry *dentry, struct inode *inode)
+{
+       struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
+
+       ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
+       ocfs2_lock_res_free(&dl->dl_lockres);
+       BUG_ON(dl->dl_count != 1);
+       spin_lock(&dentry_attach_lock);
+       dentry->d_fsdata = NULL;
+       spin_unlock(&dentry_attach_lock);
+       kfree(dl);
+       iput(inode);
+}
+
 static int ocfs2_mknod(struct inode *dir,
                       struct dentry *dentry,
                       umode_t mode,
@@ -231,6 +246,7 @@ static int ocfs2_mknod(struct inode *dir,
        sigset_t oldset;
        int did_block_signals = 0;
        struct posix_acl *default_acl = NULL, *acl = NULL;
+       struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
                          (unsigned long long)OCFS2_I(dir)->ip_blkno,
@@ -423,6 +439,8 @@ static int ocfs2_mknod(struct inode *dir,
                goto leave;
        }
 
+       dl = dentry->d_fsdata;
+
        status = ocfs2_add_entry(handle, dentry, inode,
                                 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
                                 &lookup);
@@ -469,6 +487,9 @@ leave:
         * ocfs2_delete_inode will mutex_lock again.
         */
        if ((status < 0) && inode) {
+               if (dl)
+                       ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
+
                OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
                clear_nlink(inode);
                iput(inode);
@@ -991,6 +1012,65 @@ leave:
        return status;
 }
 
+static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
+               u64 src_inode_no, u64 dest_inode_no)
+{
+       int ret = 0, i = 0;
+       u64 parent_inode_no = 0;
+       u64 child_inode_no = src_inode_no;
+       struct inode *child_inode;
+
+#define MAX_LOOKUP_TIMES 32
+       while (1) {
+               child_inode = ocfs2_iget(osb, child_inode_no, 0, 0);
+               if (IS_ERR(child_inode)) {
+                       ret = PTR_ERR(child_inode);
+                       break;
+               }
+
+               ret = ocfs2_inode_lock(child_inode, NULL, 0);
+               if (ret < 0) {
+                       iput(child_inode);
+                       if (ret != -ENOENT)
+                               mlog_errno(ret);
+                       break;
+               }
+
+               ret = ocfs2_lookup_ino_from_name(child_inode, "..", 2,
+                               &parent_inode_no);
+               ocfs2_inode_unlock(child_inode, 0);
+               iput(child_inode);
+               if (ret < 0) {
+                       ret = -ENOENT;
+                       break;
+               }
+
+               if (parent_inode_no == dest_inode_no) {
+                       ret = 1;
+                       break;
+               }
+
+               if (parent_inode_no == osb->root_inode->i_ino) {
+                       ret = 0;
+                       break;
+               }
+
+               child_inode_no = parent_inode_no;
+
+               if (++i >= MAX_LOOKUP_TIMES) {
+                       mlog(ML_NOTICE, "max lookup times reached, filesystem "
+                                       "may have nested directories, "
+                                       "src inode: %llu, dest inode: %llu.\n",
+                                       (unsigned long long)src_inode_no,
+                                       (unsigned long long)dest_inode_no);
+                       ret = 0;
+                       break;
+               }
+       }
+
+       return ret;
+}
+
 /*
  * The only place this should be used is rename!
  * if they have the same id, then the 1st one is the only one locked.
@@ -1002,6 +1082,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
                             struct inode *inode2)
 {
        int status;
+       int inode1_is_ancestor, inode2_is_ancestor;
        struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
        struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
        struct buffer_head **tmpbh;
@@ -1015,9 +1096,26 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
        if (*bh2)
                *bh2 = NULL;
 
-       /* we always want to lock the one with the lower lockid first. */
+       /* we always want to lock the one with the lower lockid first.
+        * and if they are nested, we lock ancestor first */
        if (oi1->ip_blkno != oi2->ip_blkno) {
-               if (oi1->ip_blkno < oi2->ip_blkno) {
+               inode1_is_ancestor = ocfs2_check_if_ancestor(osb, oi2->ip_blkno,
+                               oi1->ip_blkno);
+               if (inode1_is_ancestor < 0) {
+                       status = inode1_is_ancestor;
+                       goto bail;
+               }
+
+               inode2_is_ancestor = ocfs2_check_if_ancestor(osb, oi1->ip_blkno,
+                               oi2->ip_blkno);
+               if (inode2_is_ancestor < 0) {
+                       status = inode2_is_ancestor;
+                       goto bail;
+               }
+
+               if ((inode1_is_ancestor == 1) ||
+                               (oi1->ip_blkno < oi2->ip_blkno &&
+                               inode2_is_ancestor == 0)) {
                        /* switch id1 and id2 around */
                        tmpbh = bh2;
                        bh2 = bh1;
@@ -1098,6 +1196,7 @@ static int ocfs2_rename(struct inode *old_dir,
        struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, };
        struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
        struct ocfs2_dir_lookup_result target_insert = { NULL, };
+       bool should_add_orphan = false;
 
        /* At some point it might be nice to break this function up a
         * bit. */
@@ -1134,6 +1233,21 @@ static int ocfs2_rename(struct inode *old_dir,
                        goto bail;
                }
                rename_lock = 1;
+
+               /* here we cannot guarantee the inodes haven't just been
+                * changed, so check if they are nested again */
+               status = ocfs2_check_if_ancestor(osb, new_dir->i_ino,
+                               old_inode->i_ino);
+               if (status < 0) {
+                       mlog_errno(status);
+                       goto bail;
+               } else if (status == 1) {
+                       status = -EPERM;
+                       trace_ocfs2_rename_not_permitted(
+                                       (unsigned long long)old_inode->i_ino,
+                                       (unsigned long long)new_dir->i_ino);
+                       goto bail;
+               }
        }
 
        /* if old and new are the same, this'll just do one lock. */
@@ -1304,6 +1418,7 @@ static int ocfs2_rename(struct inode *old_dir,
                                mlog_errno(status);
                                goto bail;
                        }
+                       should_add_orphan = true;
                }
        } else {
                BUG_ON(new_dentry->d_parent->d_inode != new_dir);
@@ -1348,17 +1463,6 @@ static int ocfs2_rename(struct inode *old_dir,
                        goto bail;
                }
 
-               if (S_ISDIR(new_inode->i_mode) ||
-                   (ocfs2_read_links_count(newfe) == 1)) {
-                       status = ocfs2_orphan_add(osb, handle, new_inode,
-                                                 newfe_bh, orphan_name,
-                                                 &orphan_insert, orphan_dir);
-                       if (status < 0) {
-                               mlog_errno(status);
-                               goto bail;
-                       }
-               }
-
                /* change the dirent to point to the correct inode */
                status = ocfs2_update_entry(new_dir, handle, &target_lookup_res,
                                            old_inode);
@@ -1373,6 +1477,15 @@ static int ocfs2_rename(struct inode *old_dir,
                else
                        ocfs2_add_links_count(newfe, -1);
                ocfs2_journal_dirty(handle, newfe_bh);
+               if (should_add_orphan) {
+                       status = ocfs2_orphan_add(osb, handle, new_inode,
+                                       newfe_bh, orphan_name,
+                                       &orphan_insert, orphan_dir);
+                       if (status < 0) {
+                               mlog_errno(status);
+                               goto bail;
+                       }
+               }
        } else {
                /* if the name was not found in new_dir, add it now */
                status = ocfs2_add_entry(handle, new_dentry, old_inode,
@@ -1642,6 +1755,7 @@ static int ocfs2_symlink(struct inode *dir,
        struct ocfs2_dir_lookup_result lookup = { NULL, };
        sigset_t oldset;
        int did_block_signals = 0;
+       struct ocfs2_dentry_lock *dl = NULL;
 
        trace_ocfs2_symlink_begin(dir, dentry, symname,
                                  dentry->d_name.len, dentry->d_name.name);
@@ -1830,6 +1944,8 @@ static int ocfs2_symlink(struct inode *dir,
                goto bail;
        }
 
+       dl = dentry->d_fsdata;
+
        status = ocfs2_add_entry(handle, dentry, inode,
                                 le64_to_cpu(fe->i_blkno), parent_fe_bh,
                                 &lookup);
@@ -1864,6 +1980,9 @@ bail:
        if (xattr_ac)
                ocfs2_free_alloc_context(xattr_ac);
        if ((status < 0) && inode) {
+               if (dl)
+                       ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
+
                OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
                clear_nlink(inode);
                iput(inode);
index 1b60c62..6cb019b 100644 (file)
@@ -2292,6 +2292,8 @@ TRACE_EVENT(ocfs2_rename,
                  __entry->new_len, __get_str(new_name))
 );
 
+DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_rename_not_permitted);
+
 TRACE_EVENT(ocfs2_rename_target_exists,
        TP_PROTO(int new_len, const char *new_name),
        TP_ARGS(new_len, new_name),
index 714e53b..636aab6 100644 (file)
@@ -4288,9 +4288,16 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
                goto out;
        }
 
+       error = ocfs2_rw_lock(inode, 1);
+       if (error) {
+               mlog_errno(error);
+               goto out;
+       }
+
        error = ocfs2_inode_lock(inode, &old_bh, 1);
        if (error) {
                mlog_errno(error);
+               ocfs2_rw_unlock(inode, 1);
                goto out;
        }
 
@@ -4302,6 +4309,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
        up_write(&OCFS2_I(inode)->ip_xattr_sem);
 
        ocfs2_inode_unlock(inode, 1);
+       ocfs2_rw_unlock(inode, 1);
        brelse(old_bh);
 
        if (error) {
index c7a89ce..ddb662b 100644 (file)
@@ -1925,15 +1925,11 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
 
        ocfs2_shutdown_local_alloc(osb);
 
+       ocfs2_truncate_log_shutdown(osb);
+
        /* This will disable recovery and flush any recovery work. */
        ocfs2_recovery_exit(osb);
 
-       /*
-        * During dismount, when it recovers another node it will call
-        * ocfs2_recover_orphans and queue delayed work osb_truncate_log_wq.
-        */
-       ocfs2_truncate_log_shutdown(osb);
-
        ocfs2_journal_shutdown(osb);
 
        ocfs2_sync_blockdev(sb);
index 9d231e9..bf2d03f 100644 (file)
@@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
 
 static int stat_open(struct inode *inode, struct file *file)
 {
-       size_t size = 1024 + 128 * num_possible_cpus();
-       char *buf;
-       struct seq_file *m;
-       int res;
+       size_t size = 1024 + 128 * num_online_cpus();
 
        /* minimum size to display an interrupt count : 2 bytes */
        size += 2 * nr_irqs;
-
-       /* don't ask for more than the kmalloc() max size */
-       if (size > KMALLOC_MAX_SIZE)
-               size = KMALLOC_MAX_SIZE;
-       buf = kmalloc(size, GFP_KERNEL);
-       if (!buf)
-               return -ENOMEM;
-
-       res = single_open(file, show_stat, NULL);
-       if (!res) {
-               m = file->private_data;
-               m->buf = buf;
-               m->size = ksize(buf);
-       } else
-               kfree(buf);
-       return res;
+       return single_open_size(file, show_stat, NULL, size);
 }
 
 static const struct file_operations proc_stat_operations = {
index 9cd5f63..7f30bdc 100644 (file)
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
        struct dquot *dquot;
        unsigned long freed = 0;
 
+       spin_lock(&dq_list_lock);
        head = free_dquots.prev;
        while (head != &free_dquots && sc->nr_to_scan) {
                dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
                freed++;
                head = free_dquots.prev;
        }
+       spin_unlock(&dq_list_lock);
        return freed;
 }
 
index 1d641bb..3857b72 100644 (file)
@@ -8,8 +8,10 @@
 #include <linux/fs.h>
 #include <linux/export.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 #include <linux/slab.h>
 #include <linux/cred.h>
+#include <linux/mm.h>
 
 #include <asm/uaccess.h>
 #include <asm/page.h>
@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m)
        m->count = m->size;
 }
 
+static void *seq_buf_alloc(unsigned long size)
+{
+       void *buf;
+
+       buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
+       if (!buf && size > PAGE_SIZE)
+               buf = vmalloc(size);
+       return buf;
+}
+
 /**
  *     seq_open -      initialize sequential file
  *     @file: file we initialize
@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset)
                return 0;
        }
        if (!m->buf) {
-               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
                if (!m->buf)
                        return -ENOMEM;
        }
@@ -135,9 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset)
 
 Eoverflow:
        m->op->stop(m, p);
-       kfree(m->buf);
+       kvfree(m->buf);
        m->count = 0;
-       m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+       m->buf = seq_buf_alloc(m->size <<= 1);
        return !m->buf ? -ENOMEM : -EAGAIN;
 }
 
@@ -192,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
 
        /* grab buffer if we didn't have one */
        if (!m->buf) {
-               m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
                if (!m->buf)
                        goto Enomem;
        }
@@ -232,9 +244,9 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
                if (m->count < m->size)
                        goto Fill;
                m->op->stop(m, p);
-               kfree(m->buf);
+               kvfree(m->buf);
                m->count = 0;
-               m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
+               m->buf = seq_buf_alloc(m->size <<= 1);
                if (!m->buf)
                        goto Enomem;
                m->version = 0;
@@ -350,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek);
 int seq_release(struct inode *inode, struct file *file)
 {
        struct seq_file *m = file->private_data;
-       kfree(m->buf);
+       kvfree(m->buf);
        kfree(m);
        return 0;
 }
@@ -605,13 +617,13 @@ EXPORT_SYMBOL(single_open);
 int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
                void *data, size_t size)
 {
-       char *buf = kmalloc(size, GFP_KERNEL);
+       char *buf = seq_buf_alloc(size);
        int ret;
        if (!buf)
                return -ENOMEM;
        ret = single_open(file, show, data);
        if (ret) {
-               kfree(buf);
+               kvfree(buf);
                return ret;
        }
        ((struct seq_file *)file->private_data)->buf = buf;
index 3377dff..c69e6d4 100644 (file)
@@ -843,7 +843,7 @@ struct simple_xattr *simple_xattr_alloc(const void *value, size_t size)
 
        /* wrap around? */
        len = sizeof(*new_xattr) + size;
-       if (len <= sizeof(*new_xattr))
+       if (len < sizeof(*new_xattr))
                return NULL;
 
        new_xattr = kmalloc(len, GFP_KERNEL);
index 96175df..75c3fe5 100644 (file)
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
 }
 
 
-int
-__xfs_bmapi_allocate(
+static int
+xfs_bmapi_allocate(
        struct xfs_bmalloca     *bma)
 {
        struct xfs_mount        *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
        bma.flist = flist;
        bma.firstblock = firstblock;
 
-       if (flags & XFS_BMAPI_STACK_SWITCH)
-               bma.stack_switch = 1;
-
        while (bno < end && n < *nmap) {
                inhole = eof || bma.got.br_startoff > bno;
                wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
index 38ba36e..b879ca5 100644 (file)
@@ -77,7 +77,6 @@ typedef       struct xfs_bmap_free
  * from written to unwritten, otherwise convert from unwritten to written.
  */
 #define XFS_BMAPI_CONVERT      0x040
-#define XFS_BMAPI_STACK_SWITCH 0x080
 
 #define XFS_BMAPI_FLAGS \
        { XFS_BMAPI_ENTIRE,     "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef       struct xfs_bmap_free
        { XFS_BMAPI_PREALLOC,   "PREALLOC" }, \
        { XFS_BMAPI_IGSTATE,    "IGSTATE" }, \
        { XFS_BMAPI_CONTIG,     "CONTIG" }, \
-       { XFS_BMAPI_CONVERT,    "CONVERT" }, \
-       { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
+       { XFS_BMAPI_CONVERT,    "CONVERT" }
 
 
 static inline int xfs_bmapi_aflag(int w)
index 703b3ec..64731ef 100644 (file)
@@ -248,59 +248,6 @@ xfs_bmap_rtalloc(
        return 0;
 }
 
-/*
- * Stack switching interfaces for allocation
- */
-static void
-xfs_bmapi_allocate_worker(
-       struct work_struct      *work)
-{
-       struct xfs_bmalloca     *args = container_of(work,
-                                               struct xfs_bmalloca, work);
-       unsigned long           pflags;
-       unsigned long           new_pflags = PF_FSTRANS;
-
-       /*
-        * we are in a transaction context here, but may also be doing work
-        * in kswapd context, and hence we may need to inherit that state
-        * temporarily to ensure that we don't block waiting for memory reclaim
-        * in any way.
-        */
-       if (args->kswapd)
-               new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
-
-       current_set_flags_nested(&pflags, new_pflags);
-
-       args->result = __xfs_bmapi_allocate(args);
-       complete(args->done);
-
-       current_restore_flags_nested(&pflags, new_pflags);
-}
-
-/*
- * Some allocation requests often come in with little stack to work on. Push
- * them off to a worker thread so there is lots of stack to use. Otherwise just
- * call directly to avoid the context switch overhead here.
- */
-int
-xfs_bmapi_allocate(
-       struct xfs_bmalloca     *args)
-{
-       DECLARE_COMPLETION_ONSTACK(done);
-
-       if (!args->stack_switch)
-               return __xfs_bmapi_allocate(args);
-
-
-       args->done = &done;
-       args->kswapd = current_is_kswapd();
-       INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
-       queue_work(xfs_alloc_wq, &args->work);
-       wait_for_completion(&done);
-       destroy_work_on_stack(&args->work);
-       return args->result;
-}
-
 /*
  * Check if the endoff is outside the last extent. If so the caller will grow
  * the allocation to a stripe unit boundary.  All offsets are considered outside
index 075f722..2fdb72d 100644 (file)
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
        bool                    userdata;/* set if is user data */
        bool                    aeof;   /* allocated space at eof */
        bool                    conv;   /* overwriting unwritten extents */
-       bool                    stack_switch;
-       bool                    kswapd; /* allocation in kswapd context */
        int                     flags;
        struct completion       *done;
        struct work_struct      work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
 int    xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
                        int *committed);
 int    xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
-int    xfs_bmapi_allocate(struct xfs_bmalloca *args);
-int    __xfs_bmapi_allocate(struct xfs_bmalloca *args);
 int    xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
                     int whichfork, int *eof);
 int    xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
index bf810c6..cf893bc 100644 (file)
@@ -33,6 +33,7 @@
 #include "xfs_error.h"
 #include "xfs_trace.h"
 #include "xfs_cksum.h"
+#include "xfs_alloc.h"
 
 /*
  * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
  * record (to be inserted into parent).
  */
 STATIC int                                     /* error */
-xfs_btree_split(
+__xfs_btree_split(
        struct xfs_btree_cur    *cur,
        int                     level,
        union xfs_btree_ptr     *ptrp,
@@ -2503,6 +2504,85 @@ error0:
        return error;
 }
 
+struct xfs_btree_split_args {
+       struct xfs_btree_cur    *cur;
+       int                     level;
+       union xfs_btree_ptr     *ptrp;
+       union xfs_btree_key     *key;
+       struct xfs_btree_cur    **curp;
+       int                     *stat;          /* success/failure */
+       int                     result;
+       bool                    kswapd; /* allocation in kswapd context */
+       struct completion       *done;
+       struct work_struct      work;
+};
+
+/*
+ * Stack switching interfaces for allocation
+ */
+static void
+xfs_btree_split_worker(
+       struct work_struct      *work)
+{
+       struct xfs_btree_split_args     *args = container_of(work,
+                                               struct xfs_btree_split_args, work);
+       unsigned long           pflags;
+       unsigned long           new_pflags = PF_FSTRANS;
+
+       /*
+        * we are in a transaction context here, but may also be doing work
+        * in kswapd context, and hence we may need to inherit that state
+        * temporarily to ensure that we don't block waiting for memory reclaim
+        * in any way.
+        */
+       if (args->kswapd)
+               new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+
+       current_set_flags_nested(&pflags, new_pflags);
+
+       args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
+                                        args->key, args->curp, args->stat);
+       complete(args->done);
+
+       current_restore_flags_nested(&pflags, new_pflags);
+}
+
+/*
+ * BMBT split requests often come in with little stack to work on. Push
+ * them off to a worker thread so there is lots of stack to use. For the other
+ * btree types, just call directly to avoid the context switch overhead here.
+ */
+STATIC int                                     /* error */
+xfs_btree_split(
+       struct xfs_btree_cur    *cur,
+       int                     level,
+       union xfs_btree_ptr     *ptrp,
+       union xfs_btree_key     *key,
+       struct xfs_btree_cur    **curp,
+       int                     *stat)          /* success/failure */
+{
+       struct xfs_btree_split_args     args;
+       DECLARE_COMPLETION_ONSTACK(done);
+
+       if (cur->bc_btnum != XFS_BTNUM_BMAP)
+               return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
+
+       args.cur = cur;
+       args.level = level;
+       args.ptrp = ptrp;
+       args.key = key;
+       args.curp = curp;
+       args.stat = stat;
+       args.done = &done;
+       args.kswapd = current_is_kswapd();
+       INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
+       queue_work(xfs_alloc_wq, &args.work);
+       wait_for_completion(&done);
+       destroy_work_on_stack(&args.work);
+       return args.result;
+}
+
+
 /*
  * Copy the old inode root contents into a real block and make the
  * broot point to it.
index 6c5eb4c..6d3ec2b 100644 (file)
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
                         * pointer that the caller gave to us.
                         */
                        error = xfs_bmapi_write(tp, ip, map_start_fsb,
-                                               count_fsb,
-                                               XFS_BMAPI_STACK_SWITCH,
+                                               count_fsb, 0,
                                                &first_block, 1,
                                                imap, &nimaps, &free_list);
                        if (error)
index c3453b1..7703fa6 100644 (file)
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
        }
 
        /*
-        * GQUOTINO and PQUOTINO cannot be used together in versions
-        * of superblock that do not have pquotino. from->sb_flags
-        * tells us which quota is active and should be copied to
-        * disk.
+        * GQUOTINO and PQUOTINO cannot be used together in versions of
+        * superblock that do not have pquotino. from->sb_flags tells us which
+        * quota is active and should be copied to disk. If neither are active,
+        * make sure we write NULLFSINO to the sb_gquotino field as a quota
+        * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
+        * bit is set.
+        *
+        * Note that we don't need to handle the sb_uquotino or sb_pquotino here
+        * as they do not require any translation. Hence the main sb field loop
+        * will write them appropriately from the in-core superblock.
         */
        if ((*fields & XFS_SB_GQUOTINO) &&
                                (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
        else if ((*fields & XFS_SB_PQUOTINO) &&
                                (from->sb_qflags & XFS_PQUOTA_ACCT))
                to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
+       else {
+               /*
+                * We can't rely on just the fields being logged to tell us
+                * that it is safe to write NULLFSINO - we should only do that
+                * if quotas are not actually enabled. Hence only write
+                * NULLFSINO if both in-core quota inodes are NULL.
+                */
+               if (from->sb_gquotino == NULLFSINO &&
+                   from->sb_pquotino == NULLFSINO)
+                       to->sb_gquotino = cpu_to_be64(NULLFSINO);
+       }
 
        *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
 }
index 6eb1d3c..9b9b6f2 100644 (file)
@@ -53,7 +53,7 @@ struct acpi_power_register {
        u8 bit_offset;
        u8 access_size;
        u64 address;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_processor_cx {
        u8 valid;
@@ -83,7 +83,7 @@ struct acpi_psd_package {
        u64 domain;
        u64 coord_type;
        u64 num_processors;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_pct_register {
        u8 descriptor;
@@ -93,7 +93,7 @@ struct acpi_pct_register {
        u8 bit_offset;
        u8 reserved;
        u64 address;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_processor_px {
        u64 core_frequency;     /* megahertz */
@@ -124,7 +124,7 @@ struct acpi_tsd_package {
        u64 domain;
        u64 coord_type;
        u64 num_processors;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_ptc_register {
        u8 descriptor;
@@ -134,7 +134,7 @@ struct acpi_ptc_register {
        u8 bit_offset;
        u8 reserved;
        u64 address;
-} __attribute__ ((packed));
+} __packed;
 
 struct acpi_processor_tx_tss {
        u64 freqpercentage;     /* */
index ea4c7bb..843ef1a 100644 (file)
@@ -22,6 +22,7 @@ extern void acpi_video_unregister(void);
 extern void acpi_video_unregister_backlight(void);
 extern int acpi_video_get_edid(struct acpi_device *device, int type,
                               int device_id, void **edid);
+extern bool acpi_video_verify_backlight_support(void);
 #else
 static inline int acpi_video_register(void) { return 0; }
 static inline void acpi_video_unregister(void) { return; }
@@ -31,6 +32,7 @@ static inline int acpi_video_get_edid(struct acpi_device *device, int type,
 {
        return -ENODEV;
 }
+static inline bool acpi_video_verify_backlight_support(void) { return false; }
 #endif
 
 #endif
index 471ba48..c1c0b0c 100644 (file)
        . = ALIGN(PAGE_SIZE);                                           \
        *(.data..percpu..page_aligned)                                  \
        . = ALIGN(cacheline);                                           \
-       *(.data..percpu..readmostly)                                    \
+       *(.data..percpu..read_mostly)                                   \
        . = ALIGN(cacheline);                                           \
        *(.data..percpu)                                                \
        *(.data..percpu..shared_aligned)                                \
index 0572035..a70d456 100644 (file)
 #define INTEL_BDW_GT3D_IDS(info) \
        _INTEL_BDW_D_IDS(3, info)
 
+#define INTEL_BDW_RSVDM_IDS(info) \
+       _INTEL_BDW_M_IDS(4, info)
+
+#define INTEL_BDW_RSVDD_IDS(info) \
+       _INTEL_BDW_D_IDS(4, info)
+
 #define INTEL_BDW_M_IDS(info) \
        INTEL_BDW_GT12M_IDS(info), \
-       INTEL_BDW_GT3M_IDS(info)
+       INTEL_BDW_GT3M_IDS(info), \
+       INTEL_BDW_RSVDM_IDS(info)
 
 #define INTEL_BDW_D_IDS(info) \
        INTEL_BDW_GT12D_IDS(info), \
-       INTEL_BDW_GT3D_IDS(info)
+       INTEL_BDW_GT3D_IDS(info), \
+       INTEL_BDW_RSVDD_IDS(info)
 
 #define INTEL_CHV_IDS(info) \
        INTEL_VGA_DEVICE(0x22b0, info), \
index cfdc884..baa6f11 100644 (file)
@@ -30,7 +30,8 @@
 #define _I915_POWERWELL_H_
 
 /* For use by hda_i915 driver */
-extern void i915_request_power_well(void);
-extern void i915_release_power_well(void);
+extern int i915_request_power_well(void);
+extern int i915_release_power_well(void);
+extern int i915_get_cdclk_freq(void);
 
 #endif                         /* _I915_POWERWELL_H_ */
index 97dcb89..21d51ae 100644 (file)
@@ -63,7 +63,6 @@
 #define CLK_SCLK_MPHY_IXTAL24  161
 
 /* gate clocks */
-#define CLK_ACLK66_PERIC       256
 #define CLK_UART0              257
 #define CLK_UART1              258
 #define CLK_UART2              259
 #define CLK_MOUT_G3D           641
 #define CLK_MOUT_VPLL          642
 #define CLK_MOUT_MAUDIO0       643
+#define CLK_MOUT_USER_ACLK333  644
+#define CLK_MOUT_SW_ACLK333    645
 
 /* divider clocks */
 #define CLK_DOUT_PIXEL         768
index 7cf5c99..b91dd46 100644 (file)
 #define IMX6SL_CLK_USDHC4              132
 #define IMX6SL_CLK_PLL4_AUDIO_DIV      133
 #define IMX6SL_CLK_SPBA                        134
-#define IMX6SL_CLK_END                 135
+#define IMX6SL_CLK_ENET                        135
+#define IMX6SL_CLK_END                 136
 
 #endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
index 0d2c739..d80caa6 100644 (file)
@@ -10,6 +10,7 @@
 #define CLK_ETH1_PHY           4
 
 /* CLOCKGEN A1 */
+#define CLK_ICN_IF_2           0
 #define CLK_GMAC0_PHY          3
 
 #endif
index 552c779..f9bdbd1 100644 (file)
@@ -10,6 +10,7 @@
 #define CLK_ETH1_PHY           4
 
 /* CLOCKGEN A1 */
+#define CLK_ICN_IF_2           0
 #define CLK_GMAC0_PHY          3
 
 #endif
index 002a285..3d33794 100644 (file)
@@ -30,7 +30,8 @@
 #define MUX_MODE14     0xe
 #define MUX_MODE15     0xf
 
-#define PULL_ENA               (1 << 16)
+#define PULL_ENA               (0 << 16)
+#define PULL_DIS               (1 << 16)
 #define PULL_UP                        (1 << 17)
 #define INPUT_EN               (1 << 18)
 #define SLEWCONTROL            (1 << 19)
 #define WAKEUP_EVENT           (1 << 25)
 
 /* Active pin states */
-#define PIN_OUTPUT             0
+#define PIN_OUTPUT             (0 | PULL_DIS)
 #define PIN_OUTPUT_PULLUP      (PIN_OUTPUT | PULL_ENA | PULL_UP)
 #define PIN_OUTPUT_PULLDOWN    (PIN_OUTPUT | PULL_ENA)
-#define PIN_INPUT              INPUT_EN
+#define PIN_INPUT              (INPUT_EN | PULL_DIS)
 #define PIN_INPUT_SLEW         (INPUT_EN | SLEWCONTROL)
 #define PIN_INPUT_PULLUP       (PULL_ENA | INPUT_EN | PULL_UP)
 #define PIN_INPUT_PULLDOWN     (PULL_ENA | INPUT_EN)
index 7216b0d..df03562 100644 (file)
 #ifdef __KERNEL__
 #include  <linux/irqreturn.h>
 
-#ifndef bool
-#define bool int
-#endif
-
 /*
  * RECON_THRESHOLD is the maximum number of RECON messages to receive
  * within one minute before printing a "cabling problem" warning. The
@@ -285,9 +281,9 @@ struct arcnet_local {
        unsigned long first_recon; /* time of "first" RECON message to count */
        unsigned long last_recon;  /* time of most recent RECON */
        int num_recons;         /* number of RECONs between first and last. */
-       bool network_down;      /* do we think the network is down? */
+       int network_down;       /* do we think the network is down? */
 
-       bool excnak_pending;    /* We just got an excesive nak interrupt */
+       int excnak_pending;    /* We just got an excesive nak interrupt */
 
        struct {
                uint16_t sequence;      /* sequence number (incs with each packet) */
@@ -305,7 +301,7 @@ struct arcnet_local {
                void (*command) (struct net_device * dev, int cmd);
                int (*status) (struct net_device * dev);
                void (*intmask) (struct net_device * dev, int mask);
-               bool (*reset) (struct net_device * dev, bool really_reset);
+               int (*reset) (struct net_device * dev, int really_reset);
                void (*open) (struct net_device * dev);
                void (*close) (struct net_device * dev);
 
index 5a64576..d2633ee 100644 (file)
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
 #define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
        __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
 
+/*
+ * Check if adding a bio_vec after bprv with offset would create a gap in
+ * the SG list. Most drivers don't care about this, but some do.
+ */
+static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
+{
+       return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
+}
+
 #define bio_io_error(bio) bio_endio((bio), -EIO)
 
 /*
@@ -644,10 +653,6 @@ struct biovec_slab {
 
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
 
-
-
-#define bip_vec_idx(bip, idx)  (&(bip->bip_vec[(idx)]))
-
 #define bip_for_each_vec(bvl, bip, iter)                               \
        for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
 
index a002cf1..eb726b9 100644 (file)
@@ -42,7 +42,7 @@ struct blk_mq_hw_ctx {
        unsigned int            nr_ctx;
        struct blk_mq_ctx       **ctxs;
 
-       unsigned int            wait_index;
+       atomic_t                wait_index;
 
        struct blk_mq_tags      *tags;
 
index 31e1105..8699bcf 100644 (file)
@@ -512,6 +512,7 @@ struct request_queue {
 #define QUEUE_FLAG_DEAD        19      /* queue tear-down finished */
 #define QUEUE_FLAG_INIT_DONE   20      /* queue is initialized */
 #define QUEUE_FLAG_NO_SG_MERGE 21      /* don't attempt to merge SG segments*/
+#define QUEUE_FLAG_SG_GAPS     22      /* queue doesn't support SG gaps */
 
 #define QUEUE_FLAG_DEFAULT     ((1 << QUEUE_FLAG_IO_STAT) |            \
                                 (1 << QUEUE_FLAG_STACKABLE)    |       \
@@ -920,7 +921,7 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
                                               sector_t offset)
 {
        if (!q->limits.chunk_sectors)
-               return q->limits.max_hw_sectors;
+               return q->limits.max_sectors;
 
        return q->limits.chunk_sectors -
                        (offset & (q->limits.chunk_sectors - 1));
index ec4112d..8f8ae95 100644 (file)
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
  *********************************************************************/
 
 /* Special Values of .frequency field */
-#define CPUFREQ_ENTRY_INVALID  ~0
-#define CPUFREQ_TABLE_END      ~1
+#define CPUFREQ_ENTRY_INVALID  ~0u
+#define CPUFREQ_TABLE_END      ~1u
 /* Special Values of .flags field */
 #define CPUFREQ_BOOST_FREQ     (1 << 0)
 
index 7d275c4..9e8a032 100644 (file)
@@ -8,8 +8,8 @@
 #include <linux/types.h>
 #include <linux/bitrev.h>
 
-extern u32  crc32_le(u32 crc, unsigned char const *p, size_t len);
-extern u32  crc32_be(u32 crc, unsigned char const *p, size_t len);
+u32 __pure crc32_le(u32 crc, unsigned char const *p, size_t len);
+u32 __pure crc32_be(u32 crc, unsigned char const *p, size_t len);
 
 /**
  * crc32_le_combine - Combine two crc32 check values into one. For two
@@ -29,9 +29,14 @@ extern u32  crc32_be(u32 crc, unsigned char const *p, size_t len);
  *        with the same initializer as crc1, and crc2 seed was 0. See
  *        also crc32_combine_test().
  */
-extern u32  crc32_le_combine(u32 crc1, u32 crc2, size_t len2);
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len);
 
-extern u32  __crc32c_le(u32 crc, unsigned char const *p, size_t len);
+static inline u32 crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+       return crc32_le_shift(crc1, len2) ^ crc2;
+}
+
+u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len);
 
 /**
  * __crc32c_le_combine - Combine two crc32c check values into one. For two
@@ -51,7 +56,12 @@ extern u32  __crc32c_le(u32 crc, unsigned char const *p, size_t len);
  *        seeded with the same initializer as crc1, and crc2 seed
  *        was 0. See also crc32c_combine_test().
  */
-extern u32  __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2);
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len);
+
+static inline u32 __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+{
+       return __crc32c_le_shift(crc1, len2) ^ crc2;
+}
 
 #define crc32(seed, data, length)  crc32_le(seed, (unsigned char const *)(data), length)
 
index 4ff262e..45a9147 100644 (file)
@@ -133,7 +133,6 @@ extern struct request *elv_latter_request(struct request_queue *, struct request
 extern int elv_register_queue(struct request_queue *q);
 extern void elv_unregister_queue(struct request_queue *q);
 extern int elv_may_queue(struct request_queue *, int);
-extern void elv_abort_queue(struct request_queue *);
 extern void elv_completed_request(struct request_queue *, struct request *);
 extern int elv_set_request(struct request_queue *q, struct request *rq,
                           struct bio *bio, gfp_t gfp_mask);
@@ -144,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
  * io scheduler registration
  */
 extern void __init load_default_elevator_module(void);
-extern int __init elv_register(struct elevator_type *);
+extern int elv_register(struct elevator_type *);
 extern void elv_unregister(struct elevator_type *);
 
 /*
index a7e3c48..a5227ab 100644 (file)
@@ -6,6 +6,7 @@
 
 #include <linux/atomic.h>
 #include <linux/compat.h>
+#include <linux/skbuff.h>
 #include <linux/workqueue.h>
 #include <uapi/linux/filter.h>
 
@@ -81,7 +82,7 @@ enum {
 /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */
 
 #define BPF_ALU64_REG(OP, DST, SRC)                            \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_X,        \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -89,7 +90,7 @@ enum {
                .imm   = 0 })
 
 #define BPF_ALU32_REG(OP, DST, SRC)                            \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU | BPF_OP(OP) | BPF_X,          \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -99,7 +100,7 @@ enum {
 /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
 
 #define BPF_ALU64_IMM(OP, DST, IMM)                            \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU64 | BPF_OP(OP) | BPF_K,        \
                .dst_reg = DST,                                 \
                .src_reg = 0,                                   \
@@ -107,7 +108,7 @@ enum {
                .imm   = IMM })
 
 #define BPF_ALU32_IMM(OP, DST, IMM)                            \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU | BPF_OP(OP) | BPF_K,          \
                .dst_reg = DST,                                 \
                .src_reg = 0,                                   \
@@ -117,7 +118,7 @@ enum {
 /* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
 
 #define BPF_ENDIAN(TYPE, DST, LEN)                             \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU | BPF_END | BPF_SRC(TYPE),     \
                .dst_reg = DST,                                 \
                .src_reg = 0,                                   \
@@ -127,7 +128,7 @@ enum {
 /* Short form of mov, dst_reg = src_reg */
 
 #define BPF_MOV64_REG(DST, SRC)                                        \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU64 | BPF_MOV | BPF_X,           \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -135,7 +136,7 @@ enum {
                .imm   = 0 })
 
 #define BPF_MOV32_REG(DST, SRC)                                        \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU | BPF_MOV | BPF_X,             \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -145,7 +146,7 @@ enum {
 /* Short form of mov, dst_reg = imm32 */
 
 #define BPF_MOV64_IMM(DST, IMM)                                        \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU64 | BPF_MOV | BPF_K,           \
                .dst_reg = DST,                                 \
                .src_reg = 0,                                   \
@@ -153,7 +154,7 @@ enum {
                .imm   = IMM })
 
 #define BPF_MOV32_IMM(DST, IMM)                                        \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU | BPF_MOV | BPF_K,             \
                .dst_reg = DST,                                 \
                .src_reg = 0,                                   \
@@ -163,7 +164,7 @@ enum {
 /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */
 
 #define BPF_MOV64_RAW(TYPE, DST, SRC, IMM)                     \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU64 | BPF_MOV | BPF_SRC(TYPE),   \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -171,7 +172,7 @@ enum {
                .imm   = IMM })
 
 #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM)                     \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ALU | BPF_MOV | BPF_SRC(TYPE),     \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -181,7 +182,7 @@ enum {
 /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */
 
 #define BPF_LD_ABS(SIZE, IMM)                                  \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS,     \
                .dst_reg = 0,                                   \
                .src_reg = 0,                                   \
@@ -191,7 +192,7 @@ enum {
 /* Indirect packet access, R0 = *(uint *) (skb->data + src_reg + imm32) */
 
 #define BPF_LD_IND(SIZE, SRC, IMM)                             \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_LD | BPF_SIZE(SIZE) | BPF_IND,     \
                .dst_reg = 0,                                   \
                .src_reg = SRC,                                 \
@@ -201,7 +202,7 @@ enum {
 /* Memory load, dst_reg = *(uint *) (src_reg + off16) */
 
 #define BPF_LDX_MEM(SIZE, DST, SRC, OFF)                       \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM,    \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -211,7 +212,7 @@ enum {
 /* Memory store, *(uint *) (dst_reg + off16) = src_reg */
 
 #define BPF_STX_MEM(SIZE, DST, SRC, OFF)                       \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM,    \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -221,7 +222,7 @@ enum {
 /* Memory store, *(uint *) (dst_reg + off16) = imm32 */
 
 #define BPF_ST_MEM(SIZE, DST, OFF, IMM)                                \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_ST | BPF_SIZE(SIZE) | BPF_MEM,     \
                .dst_reg = DST,                                 \
                .src_reg = 0,                                   \
@@ -231,7 +232,7 @@ enum {
 /* Conditional jumps against registers, if (dst_reg 'op' src_reg) goto pc + off16 */
 
 #define BPF_JMP_REG(OP, DST, SRC, OFF)                         \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_JMP | BPF_OP(OP) | BPF_X,          \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -241,7 +242,7 @@ enum {
 /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */
 
 #define BPF_JMP_IMM(OP, DST, IMM, OFF)                         \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_JMP | BPF_OP(OP) | BPF_K,          \
                .dst_reg = DST,                                 \
                .src_reg = 0,                                   \
@@ -251,7 +252,7 @@ enum {
 /* Function call */
 
 #define BPF_EMIT_CALL(FUNC)                                    \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_JMP | BPF_CALL,                    \
                .dst_reg = 0,                                   \
                .src_reg = 0,                                   \
@@ -261,7 +262,7 @@ enum {
 /* Raw code statement block */
 
 #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM)                 \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = CODE,                                  \
                .dst_reg = DST,                                 \
                .src_reg = SRC,                                 \
@@ -271,7 +272,7 @@ enum {
 /* Program exit */
 
 #define BPF_EXIT_INSN()                                                \
-       ((struct sock_filter_int) {                             \
+       ((struct bpf_insn) {                                    \
                .code  = BPF_JMP | BPF_EXIT,                    \
                .dst_reg = 0,                                   \
                .src_reg = 0,                                   \
@@ -295,9 +296,10 @@ enum {
 })
 
 /* Macro to invoke filter function. */
-#define SK_RUN_FILTER(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
+#define SK_RUN_FILTER(filter, ctx) \
+       (*filter->prog->bpf_func)(ctx, filter->prog->insnsi)
 
-struct sock_filter_int {
+struct bpf_insn {
        __u8    code;           /* opcode */
        __u8    dst_reg:4;      /* dest register */
        __u8    src_reg:4;      /* source register */
@@ -322,54 +324,58 @@ struct sk_buff;
 struct sock;
 struct seccomp_data;
 
-struct sk_filter {
-       atomic_t                refcnt;
+struct bpf_prog {
        u32                     jited:1,        /* Is our filter JIT'ed? */
                                len:31;         /* Number of filter blocks */
        struct sock_fprog_kern  *orig_prog;     /* Original BPF program */
-       struct rcu_head         rcu;
        unsigned int            (*bpf_func)(const struct sk_buff *skb,
-                                           const struct sock_filter_int *filter);
+                                           const struct bpf_insn *filter);
        union {
                struct sock_filter      insns[0];
-               struct sock_filter_int  insnsi[0];
+               struct bpf_insn         insnsi[0];
                struct work_struct      work;
        };
 };
 
-static inline unsigned int sk_filter_size(unsigned int proglen)
+struct sk_filter {
+       atomic_t        refcnt;
+       struct rcu_head rcu;
+       struct bpf_prog *prog;
+};
+
+#define BPF_PROG_RUN(filter, ctx)  (*filter->bpf_func)(ctx, filter->insnsi)
+
+static inline unsigned int bpf_prog_size(unsigned int proglen)
 {
-       return max(sizeof(struct sk_filter),
-                  offsetof(struct sk_filter, insns[proglen]));
+       return max(sizeof(struct bpf_prog),
+                  offsetof(struct bpf_prog, insns[proglen]));
 }
 
-#define sk_filter_proglen(fprog)                       \
-               (fprog->len * sizeof(fprog->filter[0]))
+#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
 
 int sk_filter(struct sock *sk, struct sk_buff *skb);
 
-void sk_filter_select_runtime(struct sk_filter *fp);
-void sk_filter_free(struct sk_filter *fp);
+void bpf_prog_select_runtime(struct bpf_prog *fp);
+void bpf_prog_free(struct bpf_prog *fp);
 
-int sk_convert_filter(struct sock_filter *prog, int len,
-                     struct sock_filter_int *new_prog, int *new_len);
+int bpf_convert_filter(struct sock_filter *prog, int len,
+                      struct bpf_insn *new_prog, int *new_len);
 
-int sk_unattached_filter_create(struct sk_filter **pfp,
-                               struct sock_fprog_kern *fprog);
-void sk_unattached_filter_destroy(struct sk_filter *fp);
+int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
+void bpf_prog_destroy(struct bpf_prog *fp);
 
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
 int sk_detach_filter(struct sock *sk);
 
-int sk_chk_filter(struct sock_filter *filter, unsigned int flen);
+int bpf_check_classic(const struct sock_filter *filter, unsigned int flen);
 int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
                  unsigned int len);
 
-void sk_filter_charge(struct sock *sk, struct sk_filter *fp);
+bool sk_filter_charge(struct sock *sk, struct sk_filter *fp);
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp);
 
 u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
-void bpf_int_jit_compile(struct sk_filter *fp);
+void bpf_int_jit_compile(struct bpf_prog *fp);
 
 #define BPF_ANC                BIT(15)
 
@@ -406,13 +412,25 @@ static inline u16 bpf_anc_helper(const struct sock_filter *ftest)
        }
 }
 
+void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb,
+                                          int k, unsigned int size);
+
+static inline void *bpf_load_pointer(const struct sk_buff *skb, int k,
+                                    unsigned int size, void *buffer)
+{
+       if (k >= 0)
+               return skb_header_pointer(skb, k, size, buffer);
+
+       return bpf_internal_load_pointer_neg_helper(skb, k, size);
+}
+
 #ifdef CONFIG_BPF_JIT
 #include <stdarg.h>
 #include <linux/linkage.h>
 #include <linux/printk.h>
 
-void bpf_jit_compile(struct sk_filter *fp);
-void bpf_jit_free(struct sk_filter *fp);
+void bpf_jit_compile(struct bpf_prog *fp);
+void bpf_jit_free(struct bpf_prog *fp);
 
 static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
                                u32 pass, void *image)
@@ -426,11 +444,11 @@ static inline void bpf_jit_dump(unsigned int flen, unsigned int proglen,
 #else
 #include <linux/slab.h>
 
-static inline void bpf_jit_compile(struct sk_filter *fp)
+static inline void bpf_jit_compile(struct bpf_prog *fp)
 {
 }
 
-static inline void bpf_jit_free(struct sk_filter *fp)
+static inline void bpf_jit_free(struct bpf_prog *fp)
 {
        kfree(fp);
 }
index 338e6f7..e11d60c 100644 (file)
@@ -1921,6 +1921,12 @@ static inline int break_lease(struct inode *inode, unsigned int mode)
 
 static inline int break_deleg(struct inode *inode, unsigned int mode)
 {
+       /*
+        * Since this check is lockless, we must ensure that any refcounts
+        * taken are done before checking inode->i_flock. Otherwise, we could
+        * end up racing with tasks trying to set a new lease on this file.
+        */
+       smp_mb();
        if (inode->i_flock)
                return __break_lease(inode, mode, FL_DELEG);
        return 0;
index fd22789..808dcb8 100644 (file)
@@ -36,8 +36,28 @@ extern void brioctl_set(int (*ioctl_hook)(struct net *, unsigned int, void __use
 
 typedef int br_should_route_hook_t(struct sk_buff *skb);
 extern br_should_route_hook_t __rcu *br_should_route_hook;
+
+#if IS_ENABLED(CONFIG_BRIDGE) && IS_ENABLED(CONFIG_BRIDGE_IGMP_SNOOPING)
 int br_multicast_list_adjacent(struct net_device *dev,
                               struct list_head *br_ip_list);
+bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto);
 bool br_multicast_has_querier_adjacent(struct net_device *dev, int proto);
+#else
+static inline int br_multicast_list_adjacent(struct net_device *dev,
+                                            struct list_head *br_ip_list)
+{
+       return 0;
+}
+static inline bool br_multicast_has_querier_anywhere(struct net_device *dev,
+                                                    int proto)
+{
+       return false;
+}
+static inline bool br_multicast_has_querier_adjacent(struct net_device *dev,
+                                                    int proto)
+{
+       return false;
+}
+#endif
 
 #endif
index 2faef33..ff56053 100644 (file)
@@ -39,6 +39,7 @@ struct ipv6_devconf {
 #endif
        __s32           proxy_ndp;
        __s32           accept_source_route;
+       __s32           accept_ra_from_local;
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        __s32           optimistic_dad;
 #endif
@@ -193,12 +194,13 @@ struct ipv6_pinfo {
                                sndflow:1,
                                repflow:1,
                                pmtudisc:3,
-                               ipv6only:1,
+                               padding:1,      /* 1 bit hole */
                                srcprefs:3,     /* 001: prefer temporary address
                                                 * 010: prefer public address
                                                 * 100: prefer care-of address
                                                 */
-                               dontfrag:1;
+                               dontfrag:1,
+                               autoflowlabel:1;
        __u8                    min_hopcount;
        __u8                    tclass;
        __be32                  rcv_flowinfo;
@@ -256,16 +258,6 @@ static inline struct ipv6_pinfo * inet6_sk(const struct sock *__sk)
        return inet_sk(__sk)->pinet6;
 }
 
-static inline struct request_sock *inet6_reqsk_alloc(struct request_sock_ops *ops)
-{
-       struct request_sock *req = reqsk_alloc(ops);
-
-       if (req)
-               inet_rsk(req)->pktopts = NULL;
-
-       return req;
-}
-
 static inline struct raw6_sock *raw6_sk(const struct sock *sk)
 {
        return (struct raw6_sock *)sk;
@@ -282,8 +274,8 @@ static inline void inet_sk_copy_descendant(struct sock *sk_to,
        __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size);
 }
 
-#define __ipv6_only_sock(sk)   (inet6_sk(sk)->ipv6only)
-#define ipv6_only_sock(sk)     ((sk)->sk_family == PF_INET6 && __ipv6_only_sock(sk))
+#define __ipv6_only_sock(sk)   (sk->sk_ipv6only)
+#define ipv6_only_sock(sk)     (__ipv6_only_sock(sk))
 #define ipv6_sk_rxinfo(sk)     ((sk)->sk_family == PF_INET6 && \
                                 inet6_sk(sk)->rxopt.bits.rxinfo)
 
@@ -296,8 +288,8 @@ static inline const struct in6_addr *inet6_rcv_saddr(const struct sock *sk)
 
 static inline int inet_v6_ipv6only(const struct sock *sk)
 {
-       return likely(sk->sk_state != TCP_TIME_WAIT) ?
-               ipv6_only_sock(sk) : inet_twsk(sk)->tw_ipv6only;
+       /* ipv6only field is at same position for timewait and other sockets */
+       return ipv6_only_sock(sk);
 }
 #else
 #define __ipv6_only_sock(sk)   0
index 8e10f57..a0070c6 100644 (file)
@@ -180,8 +180,8 @@ struct ippp_struct {
   struct slcompress *slcomp;
 #endif
 #ifdef CONFIG_IPPP_FILTER
-  struct sk_filter *pass_filter;   /* filter for packets to pass */
-  struct sk_filter *active_filter; /* filter for pkts to reset idle */
+  struct bpf_prog *pass_filter;   /* filter for packets to pass */
+  struct bpf_prog *active_filter; /* filter for pkts to reset idle */
 #endif
   unsigned long debug;
   struct isdn_ppp_compressor *compressor,*decompressor;
index 4c52907..a9e2268 100644 (file)
@@ -501,7 +501,7 @@ static inline char * __deprecated pack_hex_byte(char *buf, u8 byte)
 extern int hex_to_bin(char ch);
 extern int __must_check hex2bin(u8 *dst, const char *src, size_t count);
 
-int mac_pton(const char *s, u8 *mac);
+bool mac_pton(const char *s, u8 *mac);
 
 /*
  * General tracing related utility functions - trace_printk(),
index 17aa1cc..30faf79 100644 (file)
@@ -91,6 +91,7 @@ struct kernfs_elem_attr {
        const struct kernfs_ops *ops;
        struct kernfs_open_node *open;
        loff_t                  size;
+       struct kernfs_node      *notify_next;   /* for kernfs_notify() */
 };
 
 /*
@@ -304,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
                               struct kernfs_root *root, unsigned long magic,
                               bool *new_sb_created, const void *ns);
 void kernfs_kill_sb(struct super_block *sb);
+struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
 
 void kernfs_init(void);
 
index 5ab4e3a..92abb49 100644 (file)
@@ -593,6 +593,7 @@ struct ata_host {
        struct device           *dev;
        void __iomem * const    *iomap;
        unsigned int            n_ports;
+       unsigned int            n_tags;                 /* nr of NCQ tags */
        void                    *private_data;
        struct ata_port_operations *ops;
        unsigned long           flags;
index b12f4bb..e15b154 100644 (file)
 #define MSIX_LEGACY_SZ         4
 #define MIN_MSIX_P_PORT                5
 
+#define MLX4_NUM_UP                    8
+#define MLX4_NUM_TC                    8
+#define MLX4_MAX_100M_UNITS_VAL                255     /*
+                                                * work around: can't set values
+                                                * greater then this value when
+                                                * using 100 Mbps units.
+                                                */
+#define MLX4_RATELIMIT_100M_UNITS      3       /* 100 Mbps */
+#define MLX4_RATELIMIT_1G_UNITS                4       /* 1 Gbps */
+#define MLX4_RATELIMIT_DEFAULT         0x00ff
+
 #define MLX4_ROCE_MAX_GIDS     128
 #define MLX4_ROCE_PF_GIDS      16
 
@@ -578,8 +589,6 @@ struct mlx4_cq {
        u32                     cons_index;
 
        u16                     irq;
-       bool                    irq_affinity_change;
-
        __be32                 *set_ci_db;
        __be32                 *arm_db;
        int                     arm_sn;
@@ -1167,6 +1176,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
                   int *vector);
 void mlx4_release_eq(struct mlx4_dev *dev, int vec);
 
+int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
+
 int mlx4_get_phys_port_id(struct mlx4_dev *dev);
 int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
 int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
@@ -1243,4 +1254,11 @@ int mlx4_vf_smi_enabled(struct mlx4_dev *dev, int slave, int port);
 int mlx4_vf_get_enable_smi_admin(struct mlx4_dev *dev, int slave, int port);
 int mlx4_vf_set_enable_smi_admin(struct mlx4_dev *dev, int slave, int port,
                                 int enable);
+
+/* Returns true if running in low memory profile (kdump kernel) */
+static inline bool mlx4_low_memory_profile(void)
+{
+       return reset_devices;
+}
+
 #endif /* MLX4_DEVICE_H */
index 3406cfb..3349471 100644 (file)
@@ -456,9 +456,6 @@ struct mlx5_eqe_cq_err {
        u8      syndrome;
 };
 
-struct mlx5_eqe_dropped_packet {
-};
-
 struct mlx5_eqe_port_state {
        u8      reserved0[8];
        u8      port;
@@ -498,7 +495,6 @@ union ev_data {
        struct mlx5_eqe_comp            comp;
        struct mlx5_eqe_qp_srq          qp_srq;
        struct mlx5_eqe_cq_err          cq_err;
-       struct mlx5_eqe_dropped_packet  dp;
        struct mlx5_eqe_port_state      port;
        struct mlx5_eqe_gpio            gpio;
        struct mlx5_eqe_congestion      cong;
index 2bce4aa..9f3a547 100644 (file)
@@ -381,8 +381,8 @@ struct mlx5_buf {
        struct mlx5_buf_list   *page_list;
        int                     nbufs;
        int                     npages;
-       int                     page_shift;
        int                     size;
+       u8                      page_shift;
 };
 
 struct mlx5_eq {
@@ -543,6 +543,10 @@ struct mlx5_priv {
        /* protect mkey key part */
        spinlock_t              mkey_lock;
        u8                      mkey_key;
+
+       struct list_head        dev_list;
+       struct list_head        ctx_list;
+       spinlock_t              ctx_lock;
 };
 
 struct mlx5_core_dev {
@@ -555,7 +559,7 @@ struct mlx5_core_dev {
        struct mlx5_init_seg __iomem *iseg;
        void                    (*event) (struct mlx5_core_dev *dev,
                                          enum mlx5_dev_event event,
-                                         void *data);
+                                         unsigned long param);
        struct mlx5_priv        priv;
        struct mlx5_profile     *profile;
        atomic_t                num_qps;
@@ -686,8 +690,6 @@ static inline u32 mlx5_base_mkey(const u32 key)
        return key & 0xffffff00u;
 }
 
-int mlx5_dev_init(struct mlx5_core_dev *dev, struct pci_dev *pdev);
-void mlx5_dev_cleanup(struct mlx5_core_dev *dev);
 int mlx5_cmd_init(struct mlx5_core_dev *dev);
 void mlx5_cmd_cleanup(struct mlx5_core_dev *dev);
 void mlx5_cmd_use_events(struct mlx5_core_dev *dev);
@@ -734,7 +736,7 @@ int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
 int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
 int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
 int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, void *inb, void *outb,
-                     u16 opmod, int port);
+                     u16 opmod, u8 port);
 void mlx5_pagealloc_init(struct mlx5_core_dev *dev);
 void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev);
 int mlx5_pagealloc_start(struct mlx5_core_dev *dev);
@@ -767,7 +769,7 @@ void mlx5_qp_debugfs_cleanup(struct mlx5_core_dev *dev);
 int mlx5_core_access_reg(struct mlx5_core_dev *dev, void *data_in,
                         int size_in, void *data_out, int size_out,
                         u16 reg_num, int arg, int write);
-int mlx5_set_port_caps(struct mlx5_core_dev *dev, int port_num, u32 caps);
+int mlx5_set_port_caps(struct mlx5_core_dev *dev, u8 port_num, u32 caps);
 
 int mlx5_debug_eq_add(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
 void mlx5_debug_eq_remove(struct mlx5_core_dev *dev, struct mlx5_eq *eq);
@@ -811,9 +813,20 @@ enum {
        MAX_MR_CACHE_ENTRIES    = 16,
 };
 
+struct mlx5_interface {
+       void *                  (*add)(struct mlx5_core_dev *dev);
+       void                    (*remove)(struct mlx5_core_dev *dev, void *context);
+       void                    (*event)(struct mlx5_core_dev *dev, void *context,
+                                        enum mlx5_dev_event event, unsigned long param);
+       struct list_head        list;
+};
+
+int mlx5_register_interface(struct mlx5_interface *intf);
+void mlx5_unregister_interface(struct mlx5_interface *intf);
+
 struct mlx5_profile {
        u64     mask;
-       u32     log_max_qp;
+       u     log_max_qp;
        struct {
                int     size;
                int     limit;
index 11692de..42aa9b9 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/lockdep.h>
 #include <linux/atomic.h>
 #include <asm/processor.h>
+#include <linux/osq_lock.h>
 
 /*
  * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
  * - detects multi-task circular deadlocks and prints out all affected
  *   locks and tasks (and only those tasks)
  */
-struct optimistic_spin_queue;
 struct mutex {
        /* 1: unlocked, 0: locked, negative: locked, possible waiters */
        atomic_t                count;
@@ -56,7 +56,7 @@ struct mutex {
        struct task_struct      *owner;
 #endif
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       struct optimistic_spin_queue    *osq;   /* Spinner MCS lock */
+       struct optimistic_spin_queue osq; /* Spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_MUTEXES
        const char              *name;
index d99800c..dcfdecb 100644 (file)
@@ -176,4 +176,12 @@ enum {
                                 NETIF_F_HW_VLAN_STAG_RX | \
                                 NETIF_F_HW_VLAN_STAG_TX)
 
+#define NETIF_F_GSO_ENCAP_ALL  (NETIF_F_GSO_GRE |                      \
+                                NETIF_F_GSO_GRE_CSUM |                 \
+                                NETIF_F_GSO_IPIP |                     \
+                                NETIF_F_GSO_SIT |                      \
+                                NETIF_F_GSO_UDP_TUNNEL |               \
+                                NETIF_F_GSO_UDP_TUNNEL_CSUM |          \
+                                NETIF_F_GSO_MPLS)
+
 #endif /* _LINUX_NETDEV_FEATURES_H */
index 66f9a04..3837739 100644 (file)
@@ -943,7 +943,8 @@ typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
  *                   const unsigned char *addr)
  *     Deletes the FDB entry from dev coresponding to addr.
  * int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
- *                    struct net_device *dev, int idx)
+ *                    struct net_device *dev, struct net_device *filter_dev,
+ *                    int idx)
  *     Used to add FDB entries to dump requests. Implementers should add
  *     entries to skb and update idx with the number of entries.
  *
@@ -1114,6 +1115,7 @@ struct net_device_ops {
        int                     (*ndo_fdb_dump)(struct sk_buff *skb,
                                                struct netlink_callback *cb,
                                                struct net_device *dev,
+                                               struct net_device *filter_dev,
                                                int idx);
 
        int                     (*ndo_bridge_setlink)(struct net_device *dev,
@@ -1229,42 +1231,228 @@ enum netdev_priv_flags {
 #define IFF_LIVE_ADDR_CHANGE           IFF_LIVE_ADDR_CHANGE
 #define IFF_MACVLAN                    IFF_MACVLAN
 
-/*
- *     The DEVICE structure.
- *     Actually, this whole structure is a big mistake.  It mixes I/O
- *     data with strictly "high-level" data, and it has to know about
- *     almost every data structure used in the INET module.
+/**
+ *     struct net_device - The DEVICE structure.
+ *             Actually, this whole structure is a big mistake.  It mixes I/O
+ *             data with strictly "high-level" data, and it has to know about
+ *             almost every data structure used in the INET module.
+ *
+ *     @name:  This is the first field of the "visible" part of this structure
+ *             (i.e. as seen by users in the "Space.c" file).  It is the name
+ *             of the interface.
+ *
+ *     @name_hlist:    Device name hash chain, please keep it close to name[]
+ *     @ifalias:       SNMP alias
+ *     @mem_end:       Shared memory end
+ *     @mem_start:     Shared memory start
+ *     @base_addr:     Device I/O address
+ *     @irq:           Device IRQ number
+ *
+ *     @state:         Generic network queuing layer state, see netdev_state_t
+ *     @dev_list:      The global list of network devices
+ *     @napi_list:     List entry, that is used for polling napi devices
+ *     @unreg_list:    List entry, that is used, when we are unregistering the
+ *                     device, see the function unregister_netdev
+ *     @close_list:    List entry, that is used, when we are closing the device
+ *
+ *     @adj_list:      Directly linked devices, like slaves for bonding
+ *     @all_adj_list:  All linked devices, *including* neighbours
+ *     @features:      Currently active device features
+ *     @hw_features:   User-changeable features
+ *
+ *     @wanted_features:       User-requested features
+ *     @vlan_features:         Mask of features inheritable by VLAN devices
+ *
+ *     @hw_enc_features:       Mask of features inherited by encapsulating devices
+ *                             This field indicates what encapsulation
+ *                             offloads the hardware is capable of doing,
+ *                             and drivers will need to set them appropriately.
+ *
+ *     @mpls_features: Mask of features inheritable by MPLS
+ *
+ *     @ifindex:       interface index
+ *     @iflink:        unique device identifier
+ *
+ *     @stats:         Statistics struct, which was left as a legacy, use
+ *                     rtnl_link_stats64 instead
+ *
+ *     @rx_dropped:    Dropped packets by core network,
+ *                     do not use this in drivers
+ *     @tx_dropped:    Dropped packets by core network,
+ *                     do not use this in drivers
+ *
+ *     @carrier_changes:       Stats to monitor carrier on<->off transitions
+ *
+ *     @wireless_handlers:     List of functions to handle Wireless Extensions,
+ *                             instead of ioctl,
+ *                             see <net/iw_handler.h> for details.
+ *     @wireless_data: Instance data managed by the core of wireless extensions
+ *
+ *     @netdev_ops:    Includes several pointers to callbacks,
+ *                     if one wants to override the ndo_*() functions
+ *     @ethtool_ops:   Management operations
+ *     @fwd_ops:       Management operations
+ *     @header_ops:    Includes callbacks for creating,parsing,rebuilding,etc
+ *                     of Layer 2 headers.
+ *
+ *     @flags:         Interface flags (a la BSD)
+ *     @priv_flags:    Like 'flags' but invisible to userspace,
+ *                     see if.h for the definitions
+ *     @gflags:        Global flags ( kept as legacy )
+ *     @padded:        How much padding added by alloc_netdev()
+ *     @operstate:     RFC2863 operstate
+ *     @link_mode:     Mapping policy to operstate
+ *     @if_port:       Selectable AUI, TP, ...
+ *     @dma:           DMA channel
+ *     @mtu:           Interface MTU value
+ *     @type:          Interface hardware type
+ *     @hard_header_len: Hardware header length
+ *
+ *     @needed_headroom: Extra headroom the hardware may need, but not in all
+ *                       cases can this be guaranteed
+ *     @needed_tailroom: Extra tailroom the hardware may need, but not in all
+ *                       cases can this be guaranteed. Some cases also use
+ *                       LL_MAX_HEADER instead to allocate the skb
+ *
+ *     interface address info:
+ *
+ *     @perm_addr:             Permanent hw address
+ *     @addr_assign_type:      Hw address assignment type
+ *     @addr_len:              Hardware address length
+ *     @neigh_priv_len;        Used in neigh_alloc(),
+ *                             initialized only in atm/clip.c
+ *     @dev_id:                Used to differentiate devices that share
+ *                             the same link layer address
+ *     @dev_port:              Used to differentiate devices that share
+ *                             the same function
+ *     @addr_list_lock:        XXX: need comments on this one
+ *     @uc:                    unicast mac addresses
+ *     @mc:                    multicast mac addresses
+ *     @dev_addrs:             list of device hw addresses
+ *     @queues_kset:           Group of all Kobjects in the Tx and RX queues
+ *     @uc_promisc:            Counter, that indicates, that promiscuous mode
+ *                             has been enabled due to the need to listen to
+ *                             additional unicast addresses in a device that
+ *                             does not implement ndo_set_rx_mode()
+ *     @promiscuity:           Number of times, the NIC is told to work in
+ *                             Promiscuous mode, if it becomes 0 the NIC will
+ *                             exit from working in Promiscuous mode
+ *     @allmulti:              Counter, enables or disables allmulticast mode
+ *
+ *     @vlan_info:     VLAN info
+ *     @dsa_ptr:       dsa specific data
+ *     @tipc_ptr:      TIPC specific data
+ *     @atalk_ptr:     AppleTalk link
+ *     @ip_ptr:        IPv4 specific data
+ *     @dn_ptr:        DECnet specific data
+ *     @ip6_ptr:       IPv6 specific data
+ *     @ax25_ptr:      AX.25 specific data
+ *     @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
+ *
+ *     @last_rx:       Time of last Rx
+ *     @dev_addr:      Hw address (before bcast,
+ *                     because most packets are unicast)
+ *
+ *     @_rx:                   Array of RX queues
+ *     @num_rx_queues:         Number of RX queues
+ *                             allocated at register_netdev() time
+ *     @real_num_rx_queues:    Number of RX queues currently active in device
+ *
+ *     @rx_handler:            handler for received packets
+ *     @rx_handler_data:       XXX: need comments on this one
+ *     @ingress_queue:         XXX: need comments on this one
+ *     @broadcast:             hw bcast address
+ *
+ *     @_tx:                   Array of TX queues
+ *     @num_tx_queues:         Number of TX queues allocated at alloc_netdev_mq() time
+ *     @real_num_tx_queues:    Number of TX queues currently active in device
+ *     @qdisc:                 Root qdisc from userspace point of view
+ *     @tx_queue_len:          Max frames per queue allowed
+ *     @tx_global_lock:        XXX: need comments on this one
+ *
+ *     @xps_maps:      XXX: need comments on this one
+ *
+ *     @rx_cpu_rmap:   CPU reverse-mapping for RX completion interrupts,
+ *                     indexed by RX queue number. Assigned by driver.
+ *                     This must only be set if the ndo_rx_flow_steer
+ *                     operation is defined
+ *
+ *     @trans_start:           Time (in jiffies) of last Tx
+ *     @watchdog_timeo:        Represents the timeout that is used by
+ *                             the watchdog ( see dev_watchdog() )
+ *     @watchdog_timer:        List of timers
+ *
+ *     @pcpu_refcnt:           Number of references to this device
+ *     @todo_list:             Delayed register/unregister
+ *     @index_hlist:           Device index hash chain
+ *     @link_watch_list:       XXX: need comments on this one
+ *
+ *     @reg_state:             Register/unregister state machine
+ *     @dismantle:             Device is going to be freed
+ *     @rtnl_link_state:       This enum represents the phases of creating
+ *                             a new link
+ *
+ *     @destructor:            Called from unregister,
+ *                             can be used to call free_netdev
+ *     @npinfo:                XXX: need comments on this one
+ *     @nd_net:                Network namespace this network device is inside
+ *
+ *     @ml_priv:       Mid-layer private
+ *     @lstats:        Loopback statistics
+ *     @tstats:        Tunnel statistics
+ *     @dstats:        Dummy statistics
+ *     @vstats:        Virtual ethernet statistics
+ *
+ *     @garp_port:     GARP
+ *     @mrp_port:      MRP
+ *
+ *     @dev:           Class/net/name entry
+ *     @sysfs_groups:  Space for optional device, statistics and wireless
+ *                     sysfs groups
+ *
+ *     @sysfs_rx_queue_group:  Space for optional per-rx queue attributes
+ *     @rtnl_link_ops: Rtnl_link_ops
+ *
+ *     @gso_max_size:  Maximum size of generic segmentation offload
+ *     @gso_max_segs:  Maximum number of segments that can be passed to the
+ *                     NIC for GSO
+ *
+ *     @dcbnl_ops:     Data Center Bridging netlink ops
+ *     @num_tc:        Number of traffic classes in the net device
+ *     @tc_to_txq:     XXX: need comments on this one
+ *     @prio_tc_map    XXX: need comments on this one
+ *
+ *     @fcoe_ddp_xid:  Max exchange id for FCoE LRO by ddp
+ *
+ *     @priomap:       XXX: need comments on this one
+ *     @phydev:        Physical device may attach itself
+ *                     for hardware timestamping
+ *
+ *     @qdisc_tx_busylock:     XXX: need comments on this one
+ *
+ *     @group:         The group, that the device belongs to
+ *     @pm_qos_req:    Power Management QoS object
  *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
 
 struct net_device {
-
-       /*
-        * This is the first field of the "visible" part of this structure
-        * (i.e. as seen by users in the "Space.c" file).  It is the name
-        * of the interface.
-        */
        char                    name[IFNAMSIZ];
-
-       /* device name hash chain, please keep it close to name[] */
        struct hlist_node       name_hlist;
-
-       /* snmp alias */
        char                    *ifalias;
-
        /*
         *      I/O specific fields
         *      FIXME: Merge these and struct ifmap into one
         */
-       unsigned long           mem_end;        /* shared mem end       */
-       unsigned long           mem_start;      /* shared mem start     */
-       unsigned long           base_addr;      /* device I/O address   */
-       int                     irq;            /* device IRQ number    */
+       unsigned long           mem_end;
+       unsigned long           mem_start;
+       unsigned long           base_addr;
+       int                     irq;
 
        /*
-        *      Some hardware also needs these fields, but they are not
+        *      Some hardware also needs these fields (state,dev_list,
+        *      napi_list,unreg_list,close_list) but they are not
         *      part of the usual set specified in Space.c.
         */
 
@@ -1275,110 +1463,80 @@ struct net_device {
        struct list_head        unreg_list;
        struct list_head        close_list;
 
-       /* directly linked devices, like slaves for bonding */
        struct {
                struct list_head upper;
                struct list_head lower;
        } adj_list;
 
-       /* all linked devices, *including* neighbours */
        struct {
                struct list_head upper;
                struct list_head lower;
        } all_adj_list;
 
-
-       /* currently active device features */
        netdev_features_t       features;
-       /* user-changeable features */
        netdev_features_t       hw_features;
-       /* user-requested features */
        netdev_features_t       wanted_features;
-       /* mask of features inheritable by VLAN devices */
        netdev_features_t       vlan_features;
-       /* mask of features inherited by encapsulating devices
-        * This field indicates what encapsulation offloads
-        * the hardware is capable of doing, and drivers will
-        * need to set them appropriately.
-        */
        netdev_features_t       hw_enc_features;
-       /* mask of fetures inheritable by MPLS */
        netdev_features_t       mpls_features;
 
-       /* Interface index. Unique device identifier    */
        int                     ifindex;
        int                     iflink;
 
        struct net_device_stats stats;
 
-       /* dropped packets by core network, Do not use this in drivers */
        atomic_long_t           rx_dropped;
        atomic_long_t           tx_dropped;
 
-       /* Stats to monitor carrier on<->off transitions */
        atomic_t                carrier_changes;
 
 #ifdef CONFIG_WIRELESS_EXT
-       /* List of functions to handle Wireless Extensions (instead of ioctl).
-        * See <net/iw_handler.h> for details. Jean II */
        const struct iw_handler_def *   wireless_handlers;
-       /* Instance data managed by the core of Wireless Extensions. */
        struct iw_public_data * wireless_data;
 #endif
-       /* Management operations */
        const struct net_device_ops *netdev_ops;
        const struct ethtool_ops *ethtool_ops;
        const struct forwarding_accel_ops *fwd_ops;
 
-       /* Hardware header description */
        const struct header_ops *header_ops;
 
-       unsigned int            flags;  /* interface flags (a la BSD)   */
-       unsigned int            priv_flags; /* Like 'flags' but invisible to userspace.
-                                            * See if.h for definitions. */
+       unsigned int            flags;
+       unsigned int            priv_flags;
+
        unsigned short          gflags;
-       unsigned short          padded; /* How much padding added by alloc_netdev() */
+       unsigned short          padded;
 
-       unsigned char           operstate; /* RFC2863 operstate */
-       unsigned char           link_mode; /* mapping policy to operstate */
+       unsigned char           operstate;
+       unsigned char           link_mode;
 
-       unsigned char           if_port;        /* Selectable AUI, TP,..*/
-       unsigned char           dma;            /* DMA channel          */
+       unsigned char           if_port;
+       unsigned char           dma;
 
-       unsigned int            mtu;    /* interface MTU value          */
-       unsigned short          type;   /* interface hardware type      */
-       unsigned short          hard_header_len;        /* hardware hdr length  */
+       unsigned int            mtu;
+       unsigned short          type;
+       unsigned short          hard_header_len;
 
-       /* extra head- and tailroom the hardware may need, but not in all cases
-        * can this be guaranteed, especially tailroom. Some cases also use
-        * LL_MAX_HEADER instead to allocate the skb.
-        */
        unsigned short          needed_headroom;
        unsigned short          needed_tailroom;
 
        /* Interface address info. */
-       unsigned char           perm_addr[MAX_ADDR_LEN]; /* permanent hw address */
-       unsigned char           addr_assign_type; /* hw address assignment type */
-       unsigned char           addr_len;       /* hardware address length      */
+       unsigned char           perm_addr[MAX_ADDR_LEN];
+       unsigned char           addr_assign_type;
+       unsigned char           addr_len;
        unsigned short          neigh_priv_len;
-       unsigned short          dev_id;         /* Used to differentiate devices
-                                                * that share the same link
-                                                * layer address
-                                                */
-       unsigned short          dev_port;       /* Used to differentiate
-                                                * devices that share the same
-                                                * function
-                                                */
+       unsigned short          dev_id;
+       unsigned short          dev_port;
        spinlock_t              addr_list_lock;
-       struct netdev_hw_addr_list      uc;     /* Unicast mac addresses */
-       struct netdev_hw_addr_list      mc;     /* Multicast mac addresses */
-       struct netdev_hw_addr_list      dev_addrs; /* list of device
-                                                   * hw addresses
-                                                   */
+       struct netdev_hw_addr_list      uc;
+       struct netdev_hw_addr_list      mc;
+       struct netdev_hw_addr_list      dev_addrs;
+
 #ifdef CONFIG_SYSFS
        struct kset             *queues_kset;
 #endif
 
+       unsigned char           name_assign_type;
+
        bool                    uc_promisc;
        unsigned int            promiscuity;
        unsigned int            allmulti;
@@ -1387,40 +1545,34 @@ struct net_device {
        /* Protocol specific pointers */
 
 #if IS_ENABLED(CONFIG_VLAN_8021Q)
-       struct vlan_info __rcu  *vlan_info;     /* VLAN info */
+       struct vlan_info __rcu  *vlan_info;
 #endif
 #if IS_ENABLED(CONFIG_NET_DSA)
-       struct dsa_switch_tree  *dsa_ptr;       /* dsa specific data */
+       struct dsa_switch_tree  *dsa_ptr;
 #endif
 #if IS_ENABLED(CONFIG_TIPC)
-       struct tipc_bearer __rcu *tipc_ptr;     /* TIPC specific data */
+       struct tipc_bearer __rcu *tipc_ptr;
 #endif
-       void                    *atalk_ptr;     /* AppleTalk link       */
-       struct in_device __rcu  *ip_ptr;        /* IPv4 specific data   */
-       struct dn_dev __rcu     *dn_ptr;        /* DECnet specific data */
-       struct inet6_dev __rcu  *ip6_ptr;       /* IPv6 specific data */
-       void                    *ax25_ptr;      /* AX.25 specific data */
-       struct wireless_dev     *ieee80211_ptr; /* IEEE 802.11 specific data,
-                                                  assign before registering */
+       void                    *atalk_ptr;
+       struct in_device __rcu  *ip_ptr;
+       struct dn_dev __rcu     *dn_ptr;
+       struct inet6_dev __rcu  *ip6_ptr;
+       void                    *ax25_ptr;
+       struct wireless_dev     *ieee80211_ptr;
 
 /*
  * Cache lines mostly used on receive path (including eth_type_trans())
  */
-       unsigned long           last_rx;        /* Time of last Rx */
+       unsigned long           last_rx;
 
        /* Interface address info used in eth_type_trans() */
-       unsigned char           *dev_addr;      /* hw address, (before bcast
-                                                  because most packets are
-                                                  unicast) */
+       unsigned char           *dev_addr;
 
 
 #ifdef CONFIG_SYSFS
        struct netdev_rx_queue  *_rx;
 
-       /* Number of RX queues allocated at register_netdev() time */
        unsigned int            num_rx_queues;
-
-       /* Number of RX queues currently active in device */
        unsigned int            real_num_rx_queues;
 
 #endif
@@ -1429,33 +1581,23 @@ struct net_device {
        void __rcu              *rx_handler_data;
 
        struct netdev_queue __rcu *ingress_queue;
-       unsigned char           broadcast[MAX_ADDR_LEN];        /* hw bcast add */
+       unsigned char           broadcast[MAX_ADDR_LEN];
 
 
 /*
  * Cache lines mostly used on transmit path
  */
        struct netdev_queue     *_tx ____cacheline_aligned_in_smp;
-
-       /* Number of TX queues allocated at alloc_netdev_mq() time  */
        unsigned int            num_tx_queues;
-
-       /* Number of TX queues currently active in device  */
        unsigned int            real_num_tx_queues;
-
-       /* root qdisc from userspace point of view */
        struct Qdisc            *qdisc;
-
-       unsigned long           tx_queue_len;   /* Max frames per queue allowed */
+       unsigned long           tx_queue_len;
        spinlock_t              tx_global_lock;
 
 #ifdef CONFIG_XPS
        struct xps_dev_maps __rcu *xps_maps;
 #endif
 #ifdef CONFIG_RFS_ACCEL
-       /* CPU reverse-mapping for RX completion interrupts, indexed
-        * by RX queue number.  Assigned by driver.  This must only be
-        * set if the ndo_rx_flow_steer operation is defined. */
        struct cpu_rmap         *rx_cpu_rmap;
 #endif
 
@@ -1465,22 +1607,17 @@ struct net_device {
         * trans_start here is expensive for high speed devices on SMP,
         * please use netdev_queue->trans_start instead.
         */
-       unsigned long           trans_start;    /* Time (in jiffies) of last Tx */
+       unsigned long           trans_start;
 
-       int                     watchdog_timeo; /* used by dev_watchdog() */
+       int                     watchdog_timeo;
        struct timer_list       watchdog_timer;
 
-       /* Number of references to this device */
        int __percpu            *pcpu_refcnt;
-
-       /* delayed register/unregister */
        struct list_head        todo_list;
-       /* device index hash chain */
-       struct hlist_node       index_hlist;
 
+       struct hlist_node       index_hlist;
        struct list_head        link_watch_list;
 
-       /* register/unregister state machine */
        enum { NETREG_UNINITIALIZED=0,
               NETREG_REGISTERED,       /* completed register_netdevice */
               NETREG_UNREGISTERING,    /* called unregister_netdevice */
@@ -1489,14 +1626,13 @@ struct net_device {
               NETREG_DUMMY,            /* dummy device for NAPI poll */
        } reg_state:8;
 
-       bool dismantle; /* device is going do be freed */
+       bool dismantle;
 
        enum {
                RTNL_LINK_INITIALIZED,
                RTNL_LINK_INITIALIZING,
        } rtnl_link_state:16;
 
-       /* Called from unregister, can be used to call free_netdev */
        void (*destructor)(struct net_device *dev);
 
 #ifdef CONFIG_NETPOLL
@@ -1504,31 +1640,25 @@ struct net_device {
 #endif
 
 #ifdef CONFIG_NET_NS
-       /* Network namespace this network device is inside */
        struct net              *nd_net;
 #endif
 
        /* mid-layer private */
        union {
-               void                            *ml_priv;
-               struct pcpu_lstats __percpu     *lstats; /* loopback stats */
+               void                                    *ml_priv;
+               struct pcpu_lstats __percpu             *lstats;
                struct pcpu_sw_netstats __percpu        *tstats;
-               struct pcpu_dstats __percpu     *dstats; /* dummy stats */
-               struct pcpu_vstats __percpu     *vstats; /* veth stats */
+               struct pcpu_dstats __percpu             *dstats;
+               struct pcpu_vstats __percpu             *vstats;
        };
-       /* GARP */
+
        struct garp_port __rcu  *garp_port;
-       /* MRP */
        struct mrp_port __rcu   *mrp_port;
 
-       /* class/net/name entry */
-       struct device           dev;
-       /* space for optional device, statistics, and wireless sysfs groups */
+       struct device   dev;
        const struct attribute_group *sysfs_groups[4];
-       /* space for optional per-rx queue attributes */
        const struct attribute_group *sysfs_rx_queue_group;
 
-       /* rtnetlink link ops */
        const struct rtnl_link_ops *rtnl_link_ops;
 
        /* for setting kernel sock attribute on TCP connection setup */
@@ -1538,7 +1668,6 @@ struct net_device {
        u16                     gso_max_segs;
 
 #ifdef CONFIG_DCB
-       /* Data Center Bridging netlink ops */
        const struct dcbnl_rtnl_ops *dcbnl_ops;
 #endif
        u8 num_tc;
@@ -1546,20 +1675,14 @@ struct net_device {
        u8 prio_tc_map[TC_BITMASK + 1];
 
 #if IS_ENABLED(CONFIG_FCOE)
-       /* max exchange id for FCoE LRO by ddp */
        unsigned int            fcoe_ddp_xid;
 #endif
 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
        struct netprio_map __rcu *priomap;
 #endif
-       /* phy device may attach itself for hardware timestamping */
        struct phy_device *phydev;
-
        struct lock_class_key *qdisc_tx_busylock;
-
-       /* group the device belongs to */
        int group;
-
        struct pm_qos_request   pm_qos_req;
 };
 #define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2486,7 +2609,7 @@ static inline int netif_set_xps_queue(struct net_device *dev,
  * as a distribution range limit for the returned value.
  */
 static inline u16 skb_tx_hash(const struct net_device *dev,
-                             const struct sk_buff *skb)
+                             struct sk_buff *skb)
 {
        return __skb_tx_hash(dev, skb, dev->real_num_tx_queues);
 }
@@ -2987,13 +3110,15 @@ void ether_setup(struct net_device *dev);
 
 /* Support for loadable net-drivers */
 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+                                   unsigned char name_assign_type,
                                    void (*setup)(struct net_device *),
                                    unsigned int txqs, unsigned int rxqs);
-#define alloc_netdev(sizeof_priv, name, setup) \
-       alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+       alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
 
-#define alloc_netdev_mq(sizeof_priv, name, setup, count) \
-       alloc_netdev_mqs(sizeof_priv, name, setup, count, count)
+#define alloc_netdev_mq(sizeof_priv, name, name_assign_type, setup, count) \
+       alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, count, \
+                        count)
 
 int register_netdev(struct net_device *dev);
 void unregister_netdev(struct net_device *dev);
@@ -3377,11 +3502,26 @@ extern struct pernet_operations __net_initdata loopback_net_ops;
 
 static inline const char *netdev_name(const struct net_device *dev)
 {
-       if (dev->reg_state != NETREG_REGISTERED)
-               return "(unregistered net_device)";
+       if (!dev->name[0] || strchr(dev->name, '%'))
+               return "(unnamed net_device)";
        return dev->name;
 }
 
+static inline const char *netdev_reg_state(const struct net_device *dev)
+{
+       switch (dev->reg_state) {
+       case NETREG_UNINITIALIZED: return " (uninitialized)";
+       case NETREG_REGISTERED: return "";
+       case NETREG_UNREGISTERING: return " (unregistering)";
+       case NETREG_UNREGISTERED: return " (unregistered)";
+       case NETREG_RELEASED: return " (released)";
+       case NETREG_DUMMY: return " (dummy)";
+       }
+
+       WARN_ONCE(1, "%s: unknown reg_state %d\n", dev->name, dev->reg_state);
+       return " (unknown)";
+}
+
 __printf(3, 4)
 int netdev_printk(const char *level, const struct net_device *dev,
                  const char *format, ...);
@@ -3438,7 +3578,8 @@ do {                                                              \
  * file/line information and a backtrace.
  */
 #define netdev_WARN(dev, format, args...)                      \
-       WARN(1, "netdevice: %s\n" format, netdev_name(dev), ##args)
+       WARN(1, "netdevice: %s%s\n" format, netdev_name(dev),   \
+            netdev_reg_state(dev), ##args)
 
 /* netif printk helpers, similar to netdev_printk */
 
index 6a45fb5..447775e 100644 (file)
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void)
 #ifdef arch_trigger_all_cpu_backtrace
 static inline bool trigger_all_cpu_backtrace(void)
 {
-       arch_trigger_all_cpu_backtrace();
+       arch_trigger_all_cpu_backtrace(true);
 
        return true;
 }
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+       arch_trigger_all_cpu_backtrace(false);
+       return true;
+}
 #else
 static inline bool trigger_all_cpu_backtrace(void)
 {
        return false;
 }
+static inline bool trigger_allbutself_cpu_backtrace(void)
+{
+       return false;
+}
 #endif
 
 #ifdef CONFIG_LOCKUP_DETECTOR
@@ -48,6 +57,7 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
 u64 hw_nmi_get_sample_period(int watchdog_thresh);
 extern int watchdog_user_enabled;
 extern int watchdog_thresh;
+extern int sysctl_softlockup_all_cpu_backtrace;
 struct ctl_table;
 extern int proc_dowatchdog(struct ctl_table *, int ,
                           void __user *, size_t *, loff_t *);
index 0511789..0ff360d 100644 (file)
@@ -73,6 +73,8 @@ extern int early_init_dt_scan_root(unsigned long node, const char *uname,
                                   int depth, void *data);
 
 extern bool early_init_dt_scan(void *params);
+extern bool early_init_dt_verify(void *params);
+extern void early_init_dt_scan_nodes(void);
 
 extern const char *of_flat_dt_get_machine_name(void);
 extern const void *of_flat_dt_match_machine(const void *default_match,
@@ -84,6 +86,7 @@ extern void unflatten_and_copy_device_tree(void);
 extern void early_init_devtree(void *);
 extern void early_get_first_memblock_info(void *, phys_addr_t *);
 extern u64 fdt_translate_address(const void *blob, int node_offset);
+extern void of_fdt_limit_memory(int limit);
 #else /* CONFIG_OF_FLATTREE */
 static inline void early_init_fdt_scan_reserved_mem(void) {}
 static inline const char *of_flat_dt_get_machine_name(void) { return NULL; }
index a70c949..d449018 100644 (file)
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
 
 extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
 
-extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                                  struct phy_device *phydev);
-
 #else /* CONFIG_OF */
 static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
 {
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
 {
        return NULL;
 }
-
-static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
-                                         struct phy_device *phydev)
-{
-}
 #endif /* CONFIG_OF */
 
 #if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644 (file)
index 0000000..90230d5
--- /dev/null
@@ -0,0 +1,27 @@
+#ifndef __LINUX_OSQ_LOCK_H
+#define __LINUX_OSQ_LOCK_H
+
+/*
+ * An MCS like lock especially tailored for optimistic spinning for sleeping
+ * lock implementations (mutex, rwsem, etc).
+ */
+
+#define OSQ_UNLOCKED_VAL (0)
+
+struct optimistic_spin_queue {
+       /*
+        * Stores an encoded value of the CPU # of the tail node in the queue.
+        * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
+        */
+       atomic_t tail;
+};
+
+/* Init macro and function. */
+#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
+
+static inline void osq_lock_init(struct optimistic_spin_queue *lock)
+{
+       atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
+}
+
+#endif
index 3c545b4..8304959 100644 (file)
@@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page)
        ClearPageHead(page);
 }
 #endif
+
+#define PG_head_mask ((1L << PG_head))
+
 #else
 /*
  * Reduce page flag use as much as possible by overlapping
index 0a97b58..e1474ae 100644 (file)
@@ -398,6 +398,18 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
        return read_cache_page(mapping, index, filler, data);
 }
 
+/*
+ * Get the offset in PAGE_SIZE.
+ * (TODO: hugepage should have ->index in PAGE_SIZE)
+ */
+static inline pgoff_t page_to_pgoff(struct page *page)
+{
+       if (unlikely(PageHeadHuge(page)))
+               return page->index << compound_order(page);
+       else
+               return page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+}
+
 /*
  * Return byte-offset into filesystem object for page.
  */
index a5fc7d0..dec01d6 100644 (file)
  * Declaration/definition used for per-CPU variables that must be read mostly.
  */
 #define DECLARE_PER_CPU_READ_MOSTLY(type, name)                        \
-       DECLARE_PER_CPU_SECTION(type, name, "..readmostly")
+       DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
 
 #define DEFINE_PER_CPU_READ_MOSTLY(type, name)                         \
-       DEFINE_PER_CPU_SECTION(type, name, "..readmostly")
+       DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
 
 /*
  * Intermodule exports for per-CPU variables.  sparse forgets about
index 864ddaf..ed39956 100644 (file)
@@ -536,6 +536,33 @@ struct phy_driver {
        /* See set_wol, but for checking whether Wake on LAN is enabled. */
        void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
 
+       /*
+        * Called to inform a PHY device driver when the core is about to
+        * change the link state. This callback is supposed to be used as
+        * fixup hook for drivers that need to take action when the link
+        * state changes. Drivers are by no means allowed to mess with the
+        * PHY device structure in their implementations.
+        */
+       void (*link_change_notify)(struct phy_device *dev);
+
+       /* A function provided by a phy specific driver to override the
+        * the PHY driver framework support for reading a MMD register
+        * from the PHY. If not supported, return -1. This function is
+        * optional for PHY specific drivers, if not provided then the
+        * default MMD read function is used by the PHY framework.
+        */
+       int (*read_mmd_indirect)(struct phy_device *dev, int ptrad,
+                                int devnum, int regnum);
+
+       /* A function provided by a phy specific driver to override the
+        * the PHY driver framework support for writing a MMD register
+        * from the PHY. This function is optional for PHY specific drivers,
+        * if not provided then the default MMD read function is used by
+        * the PHY framework.
+        */
+       void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
+                                  int devnum, int regnum, u32 val);
+
        struct device_driver driver;
 };
 #define to_phy_driver(d) container_of(d, struct phy_driver, driver)
index aaad386..b537a25 100644 (file)
@@ -44,6 +44,7 @@ extern int prof_on __read_mostly;
 int profile_init(void);
 int profile_setup(char *str);
 void profile_tick(int type);
+int setup_profiling_timer(unsigned int multiplier);
 
 /*
  * Add multiple profiler hits to a given address:
index 7dfed71..159c987 100644 (file)
@@ -33,8 +33,8 @@
 #define PTP_CLASS_IPV4  0x10 /* event in an IPV4 UDP packet */
 #define PTP_CLASS_IPV6  0x20 /* event in an IPV6 UDP packet */
 #define PTP_CLASS_L2    0x30 /* event in a L2 packet */
-#define PTP_CLASS_VLAN  0x40 /* event in a VLAN tagged L2 packet */
-#define PTP_CLASS_PMASK 0xf0 /* mask for the packet type field */
+#define PTP_CLASS_PMASK 0x30 /* mask for the packet type field */
+#define PTP_CLASS_VLAN  0x40 /* event in a VLAN tagged packet */
 
 #define PTP_CLASS_V1_IPV4 (PTP_CLASS_V1 | PTP_CLASS_IPV4)
 #define PTP_CLASS_V1_IPV6 (PTP_CLASS_V1 | PTP_CLASS_IPV6) /* probably DNE */
@@ -54,7 +54,6 @@
 #define IP6_HLEN       40
 #define UDP_HLEN       8
 #define OFF_IHL                14
-#define OFF_PTP6       (ETH_HLEN + IP6_HLEN + UDP_HLEN)
 #define IPV4_HLEN(data) (((struct iphdr *)(data + OFF_IHL))->ihl << 2)
 
 #if defined(CONFIG_NET_PTP_CLASSIFY)
index 077904c..cc79eff 100644 (file)
@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
  * calling arch_ptrace_stop() when it would be superfluous.  For example,
  * if the thread has not been back to user mode since the last stop, the
  * thread state might indicate that nothing needs to be done.
+ *
+ * This is guaranteed to be invoked once before a task stops for ptrace and
+ * may include arch-specific operations necessary prior to a ptrace stop.
  */
 #define arch_ptrace_stop_needed(code, info)    (0)
 #endif
index 5a75d19..6a94cc8 100644 (file)
@@ -44,7 +44,6 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
-#include <linux/percpu.h>
 #include <asm/barrier.h>
 
 extern int rcu_expedited; /* for sysctl */
@@ -299,41 +298,6 @@ static inline void rcu_user_hooks_switch(struct task_struct *prev,
 bool __rcu_is_watching(void);
 #endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
 
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-#define RCU_COND_RESCHED_LIM 256       /* ms vs. 100s of ms. */
-DECLARE_PER_CPU(int, rcu_cond_resched_count);
-void rcu_resched(void);
-
-/*
- * Is it time to report RCU quiescent states?
- *
- * Note unsynchronized access to rcu_cond_resched_count.  Yes, we might
- * increment some random CPU's count, and possibly also load the result from
- * yet another CPU's count.  We might even clobber some other CPU's attempt
- * to zero its counter.  This is all OK because the goal is not precision,
- * but rather reasonable amortization of rcu_note_context_switch() overhead
- * and extremely high probability of avoiding RCU CPU stall warnings.
- * Note that this function has to be preempted in just the wrong place,
- * many thousands of times in a row, for anything bad to happen.
- */
-static inline bool rcu_should_resched(void)
-{
-       return raw_cpu_inc_return(rcu_cond_resched_count) >=
-              RCU_COND_RESCHED_LIM;
-}
-
-/*
- * Report quiscent states to RCU if it is time to do so.
- */
-static inline void rcu_cond_resched(void)
-{
-       if (unlikely(rcu_should_resched()))
-               rcu_resched();
-}
-
 /*
  * Infrastructure to implement the synchronize_() primitives in
  * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
  * initialization.
  */
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
+void init_rcu_head(struct rcu_head *head);
+void destroy_rcu_head(struct rcu_head *head);
 void init_rcu_head_on_stack(struct rcu_head *head);
 void destroy_rcu_head_on_stack(struct rcu_head *head);
 #else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
+static inline void init_rcu_head(struct rcu_head *head)
+{
+}
+
+static inline void destroy_rcu_head(struct rcu_head *head)
+{
+}
+
 static inline void init_rcu_head_on_stack(struct rcu_head *head)
 {
 }
index a2d9d81..14ec18d 100644 (file)
@@ -395,6 +395,11 @@ static inline void regulator_bulk_free(int num_consumers,
 {
 }
 
+static inline int regulator_can_change_voltage(struct regulator *regulator)
+{
+       return 0;
+}
+
 static inline int regulator_set_voltage(struct regulator *regulator,
                                        int min_uV, int max_uV)
 {
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
new file mode 100644 (file)
index 0000000..9cda293
--- /dev/null
@@ -0,0 +1,213 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on the following paper by Josh Triplett, Paul E. McKenney
+ * and Jonathan Walpole:
+ * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
+ *
+ * Code partially derived from nft_hash
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _LINUX_RHASHTABLE_H
+#define _LINUX_RHASHTABLE_H
+
+#include <linux/rculist.h>
+
+struct rhash_head {
+       struct rhash_head               *next;
+};
+
+#define INIT_HASH_HEAD(ptr) ((ptr)->next = NULL)
+
+struct bucket_table {
+       size_t                          size;
+       struct rhash_head __rcu         *buckets[];
+};
+
+typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 seed);
+
+struct rhashtable;
+
+/**
+ * struct rhashtable_params - Hash table construction parameters
+ * @nelem_hint: Hint on number of elements, should be 75% of desired size
+ * @key_len: Length of key
+ * @key_offset: Offset of key in struct to be hashed
+ * @head_offset: Offset of rhash_head in struct to be hashed
+ * @hash_rnd: Seed to use while hashing
+ * @max_shift: Maximum number of shifts while expanding
+ * @hashfn: Function to hash key
+ * @obj_hashfn: Function to hash object
+ * @grow_decision: If defined, may return true if table should expand
+ * @shrink_decision: If defined, may return true if table should shrink
+ * @mutex_is_held: Must return true if protecting mutex is held
+ */
+struct rhashtable_params {
+       size_t                  nelem_hint;
+       size_t                  key_len;
+       size_t                  key_offset;
+       size_t                  head_offset;
+       u32                     hash_rnd;
+       size_t                  max_shift;
+       rht_hashfn_t            hashfn;
+       rht_obj_hashfn_t        obj_hashfn;
+       bool                    (*grow_decision)(const struct rhashtable *ht,
+                                                size_t new_size);
+       bool                    (*shrink_decision)(const struct rhashtable *ht,
+                                                  size_t new_size);
+       int                     (*mutex_is_held)(void);
+};
+
+/**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+ * @nelems: Number of elements in table
+ * @shift: Current size (1 << shift)
+ * @p: Configuration parameters
+ */
+struct rhashtable {
+       struct bucket_table __rcu       *tbl;
+       size_t                          nelems;
+       size_t                          shift;
+       struct rhashtable_params        p;
+};
+
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rht_mutex_is_held(const struct rhashtable *ht);
+#else
+static inline int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+{
+       return 1;
+}
+#endif /* CONFIG_PROVE_LOCKING */
+
+int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params);
+
+u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len);
+u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr);
+
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *node, gfp_t);
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *node, gfp_t);
+void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
+                            struct rhash_head **pprev, gfp_t flags);
+
+bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size);
+bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size);
+
+int rhashtable_expand(struct rhashtable *ht, gfp_t flags);
+int rhashtable_shrink(struct rhashtable *ht, gfp_t flags);
+
+void *rhashtable_lookup(const struct rhashtable *ht, const void *key);
+void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+                               bool (*compare)(void *, void *), void *arg);
+
+void rhashtable_destroy(const struct rhashtable *ht);
+
+#define rht_dereference(p, ht) \
+       rcu_dereference_protected(p, lockdep_rht_mutex_is_held(ht))
+
+#define rht_dereference_rcu(p, ht) \
+       rcu_dereference_check(p, lockdep_rht_mutex_is_held(ht))
+
+/* Internal, use rht_obj() instead */
+#define rht_entry(ptr, type, member) container_of(ptr, type, member)
+#define rht_entry_safe(ptr, type, member) \
+({ \
+       typeof(ptr) __ptr = (ptr); \
+          __ptr ? rht_entry(__ptr, type, member) : NULL; \
+})
+#define rht_entry_safe_rcu(ptr, type, member) \
+({ \
+       typeof(*ptr) __rcu *__ptr = (typeof(*ptr) __rcu __force *)ptr; \
+       __ptr ? container_of((typeof(ptr))rcu_dereference_raw(__ptr), type, member) : NULL; \
+})
+
+#define rht_next_entry_safe(pos, ht, member) \
+({ \
+       pos ? rht_entry_safe(rht_dereference((pos)->member.next, ht), \
+                            typeof(*(pos)), member) : NULL; \
+})
+
+/**
+ * rht_for_each - iterate over hash chain
+ * @pos:       &struct rhash_head to use as a loop cursor.
+ * @head:      head of the hash chain (struct rhash_head *)
+ * @ht:                pointer to your struct rhashtable
+ */
+#define rht_for_each(pos, head, ht) \
+       for (pos = rht_dereference(head, ht); \
+            pos; \
+            pos = rht_dereference((pos)->next, ht))
+
+/**
+ * rht_for_each_entry - iterate over hash chain of given type
+ * @pos:       type * to use as a loop cursor.
+ * @head:      head of the hash chain (struct rhash_head *)
+ * @ht:                pointer to your struct rhashtable
+ * @member:    name of the rhash_head within the hashable struct.
+ */
+#define rht_for_each_entry(pos, head, ht, member) \
+       for (pos = rht_entry_safe(rht_dereference(head, ht), \
+                                  typeof(*(pos)), member); \
+            pos; \
+            pos = rht_next_entry_safe(pos, ht, member))
+
+/**
+ * rht_for_each_entry_safe - safely iterate over hash chain of given type
+ * @pos:       type * to use as a loop cursor.
+ * @n:         type * to use for temporary next object storage
+ * @head:      head of the hash chain (struct rhash_head *)
+ * @ht:                pointer to your struct rhashtable
+ * @member:    name of the rhash_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive allows for the looped code to
+ * remove the loop cursor from the list.
+ */
+#define rht_for_each_entry_safe(pos, n, head, ht, member)              \
+       for (pos = rht_entry_safe(rht_dereference(head, ht), \
+                                 typeof(*(pos)), member), \
+            n = rht_next_entry_safe(pos, ht, member); \
+            pos; \
+            pos = n, \
+            n = rht_next_entry_safe(pos, ht, member))
+
+/**
+ * rht_for_each_rcu - iterate over rcu hash chain
+ * @pos:       &struct rhash_head to use as a loop cursor.
+ * @head:      head of the hash chain (struct rhash_head *)
+ * @ht:                pointer to your struct rhashtable
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_rcu(pos, head, ht) \
+       for (pos = rht_dereference_rcu(head, ht); \
+            pos; \
+            pos = rht_dereference_rcu((pos)->next, ht))
+
+/**
+ * rht_for_each_entry_rcu - iterate over rcu hash chain of given type
+ * @pos:       type * to use as a loop cursor.
+ * @head:      head of the hash chain (struct rhash_head *)
+ * @member:    name of the rhash_head within the hashable struct.
+ *
+ * This hash chain list-traversal primitive may safely run concurrently with
+ * the _rcu fkht mutation primitives such as rht_insert() as long as the
+ * traversal is guarded by rcu_read_lock().
+ */
+#define rht_for_each_entry_rcu(pos, head, member) \
+       for (pos = rht_entry_safe_rcu(head, typeof(*(pos)), member); \
+            pos; \
+            pos = rht_entry_safe_rcu((pos)->member.next, \
+                                     typeof(*(pos)), member))
+
+#endif /* _LINUX_RHASHTABLE_H */
index 0c8dc71..93c0a64 100644 (file)
@@ -65,6 +65,7 @@
 #define        RNDIS_STATUS_MEDIA_SPECIFIC_INDICATION  0x40010012
 #define RNDIS_STATUS_WW_INDICATION             RDIA_SPECIFIC_INDICATION
 #define RNDIS_STATUS_LINK_SPEED_CHANGE         0x40010013L
+#define RNDIS_STATUS_NETWORK_CHANGE            0x40010018
 
 #define RNDIS_STATUS_NOT_RESETTABLE            0x80010001
 #define RNDIS_STATUS_SOFT_ERRORS               0x80010003
index 953937e..167bae7 100644 (file)
@@ -78,6 +78,7 @@ extern void __rtnl_unlock(void);
 extern int ndo_dflt_fdb_dump(struct sk_buff *skb,
                             struct netlink_callback *cb,
                             struct net_device *dev,
+                            struct net_device *filter_dev,
                             int idx);
 extern int ndo_dflt_fdb_add(struct ndmsg *ndm,
                            struct nlattr *tb[],
index d5b13bc..561e861 100644 (file)
 #ifdef __KERNEL__
 /*
  * the rw-semaphore definition
- * - if activity is 0 then there are no active readers or writers
- * - if activity is +ve then that is the number of active readers
- * - if activity is -1 then there is one active writer
+ * - if count is 0 then there are no active readers or writers
+ * - if count is +ve then that is the number of active readers
+ * - if count is -1 then there is one active writer
  * - if wait_list is not empty, then there are processes waiting for the semaphore
  */
 struct rw_semaphore {
-       __s32                   activity;
+       __s32                   count;
        raw_spinlock_t          wait_lock;
        struct list_head        wait_list;
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
index 8d79708..035d3c5 100644 (file)
 #include <linux/kernel.h>
 #include <linux/list.h>
 #include <linux/spinlock.h>
-
 #include <linux/atomic.h>
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#include <linux/osq_lock.h>
+#endif
 
-struct optimistic_spin_queue;
 struct rw_semaphore;
 
 #ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
 /* All arch specific implementations share the same struct */
 struct rw_semaphore {
        long count;
-       raw_spinlock_t wait_lock;
        struct list_head wait_list;
-#ifdef CONFIG_SMP
+       raw_spinlock_t wait_lock;
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+       struct optimistic_spin_queue osq; /* spinner MCS lock */
        /*
         * Write owner. Used as a speculative check to see
         * if the owner is running on the cpu.
         */
        struct task_struct *owner;
-       struct optimistic_spin_queue *osq; /* spinner MCS lock */
 #endif
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
        struct lockdep_map      dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
 # define __RWSEM_DEP_MAP_INIT(lockname)
 #endif
 
-#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK)
-#define __RWSEM_INITIALIZER(name)                      \
-       { RWSEM_UNLOCKED_VALUE,                         \
-         __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),     \
-         LIST_HEAD_INIT((name).wait_list),             \
-         NULL, /* owner */                             \
-         NULL /* mcs lock */                           \
-         __RWSEM_DEP_MAP_INIT(name) }
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
+#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
 #else
-#define __RWSEM_INITIALIZER(name)                      \
-       { RWSEM_UNLOCKED_VALUE,                         \
-         __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock),     \
-         LIST_HEAD_INIT((name).wait_list)              \
-         __RWSEM_DEP_MAP_INIT(name) }
+#define __RWSEM_OPT_INIT(lockname)
 #endif
 
+#define __RWSEM_INITIALIZER(name)                              \
+       { .count = RWSEM_UNLOCKED_VALUE,                        \
+         .wait_list = LIST_HEAD_INIT((name).wait_list),        \
+         .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
+         __RWSEM_OPT_INIT(name)                                \
+         __RWSEM_DEP_MAP_INIT(name) }
+
 #define DECLARE_RWSEM(name) \
        struct rw_semaphore name = __RWSEM_INITIALIZER(name)
 
index 306f4f0..0376b05 100644 (file)
@@ -872,21 +872,21 @@ enum cpu_idle_type {
 #define SD_NUMA                        0x4000  /* cross-node balancing */
 
 #ifdef CONFIG_SCHED_SMT
-static inline const int cpu_smt_flags(void)
+static inline int cpu_smt_flags(void)
 {
        return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
 }
 #endif
 
 #ifdef CONFIG_SCHED_MC
-static inline const int cpu_core_flags(void)
+static inline int cpu_core_flags(void)
 {
        return SD_SHARE_PKG_RESOURCES;
 }
 #endif
 
 #ifdef CONFIG_NUMA
-static inline const int cpu_numa_flags(void)
+static inline int cpu_numa_flags(void)
 {
        return SD_NUMA;
 }
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
 bool cpus_share_cache(int this_cpu, int that_cpu);
 
 typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
-typedef const int (*sched_domain_flags_f)(void);
+typedef int (*sched_domain_flags_f)(void);
 
 #define SDTL_OVERLAP   0x01
 
index ec89301..281dece 100644 (file)
 #define CHECKSUM_COMPLETE      2
 #define CHECKSUM_PARTIAL       3
 
-#define SKB_DATA_ALIGN(X)      (((X) + (SMP_CACHE_BYTES - 1)) & \
-                                ~(SMP_CACHE_BYTES - 1))
+#define SKB_DATA_ALIGN(X)      ALIGN(X, SMP_CACHE_BYTES)
 #define SKB_WITH_OVERHEAD(X)   \
        ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
 #define SKB_MAX_ORDER(X, ORDER) \
@@ -211,18 +210,9 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
  * struct skb_shared_hwtstamps - hardware time stamps
  * @hwtstamp:  hardware time stamp transformed into duration
  *             since arbitrary point in time
- * @syststamp: hwtstamp transformed to system time base
  *
  * Software time stamps generated by ktime_get_real() are stored in
- * skb->tstamp. The relation between the different kinds of time
- * stamps is as follows:
- *
- * syststamp and tstamp can be compared against each other in
- * arbitrary combinations.  The accuracy of a
- * syststamp/tstamp/"syststamp from other device" comparison is
- * limited by the accuracy of the transformation into system time
- * base. This depends on the device driver and its underlying
- * hardware.
+ * skb->tstamp.
  *
  * hwtstamps can only be compared against other hwtstamps from
  * the same device.
@@ -232,7 +222,6 @@ static inline void skb_frag_size_sub(skb_frag_t *frag, int delta)
  */
 struct skb_shared_hwtstamps {
        ktime_t hwtstamp;
-       ktime_t syststamp;
 };
 
 /* Definitions for tx_flags in struct skb_shared_info */
@@ -455,6 +444,7 @@ static inline u32 skb_mstamp_us_delta(const struct skb_mstamp *t1,
  *     @ooo_okay: allow the mapping of a socket to a queue to be changed
  *     @l4_hash: indicate hash is a canonical 4-tuple hash over transport
  *             ports.
+ *     @sw_hash: indicates hash was computed in software stack
  *     @wifi_acked_valid: wifi_acked was set
  *     @wifi_acked: whether frame was acked on wifi or not
  *     @no_fcs:  Request NIC to treat last 4 bytes as Ethernet FCS
@@ -562,6 +552,7 @@ struct sk_buff {
        __u8                    pfmemalloc:1;
        __u8                    ooo_okay:1;
        __u8                    l4_hash:1;
+       __u8                    sw_hash:1;
        __u8                    wifi_acked_valid:1;
        __u8                    wifi_acked:1;
        __u8                    no_fcs:1;
@@ -575,7 +566,7 @@ struct sk_buff {
        __u8                    encap_hdr_csum:1;
        __u8                    csum_valid:1;
        __u8                    csum_complete_sw:1;
-       /* 3/5 bit hole (depending on ndisc_nodetype presence) */
+       /* 2/4 bit hole (depending on ndisc_nodetype presence) */
        kmemcheck_bitfield_end(flags2);
 
 #if defined CONFIG_NET_DMA || defined CONFIG_NET_RX_BUSY_POLL
@@ -830,13 +821,14 @@ static inline void
 skb_set_hash(struct sk_buff *skb, __u32 hash, enum pkt_hash_types type)
 {
        skb->l4_hash = (type == PKT_HASH_TYPE_L4);
+       skb->sw_hash = 0;
        skb->hash = hash;
 }
 
 void __skb_get_hash(struct sk_buff *skb);
 static inline __u32 skb_get_hash(struct sk_buff *skb)
 {
-       if (!skb->l4_hash)
+       if (!skb->l4_hash && !skb->sw_hash)
                __skb_get_hash(skb);
 
        return skb->hash;
@@ -850,6 +842,7 @@ static inline __u32 skb_get_hash_raw(const struct sk_buff *skb)
 static inline void skb_clear_hash(struct sk_buff *skb)
 {
        skb->hash = 0;
+       skb->sw_hash = 0;
        skb->l4_hash = 0;
 }
 
@@ -862,6 +855,7 @@ static inline void skb_clear_hash_if_not_l4(struct sk_buff *skb)
 static inline void skb_copy_hash(struct sk_buff *to, const struct sk_buff *from)
 {
        to->hash = from->hash;
+       to->sw_hash = from->sw_hash;
        to->l4_hash = from->l4_hash;
 };
 
@@ -3005,7 +2999,7 @@ static inline bool skb_rx_queue_recorded(const struct sk_buff *skb)
        return skb->queue_mapping != 0;
 }
 
-u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
                  unsigned int num_tx_queues);
 
 static inline struct sec_path *skb_sec_path(struct sk_buff *skb)
index 8e98297..ec538fc 100644 (file)
@@ -305,8 +305,6 @@ struct ucred {
 /* IPX options */
 #define IPX_TYPE       1
 
-extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                              int offset, int len);
 extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 
                                          struct iovec *iov, 
                                          int offset, 
@@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset,
                               unsigned long nr_segs);
 
 extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
-extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
-                            int offset, int len);
 extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
 extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
 
diff --git a/include/linux/spi/cc2520.h b/include/linux/spi/cc2520.h
new file mode 100644 (file)
index 0000000..85b8ee6
--- /dev/null
@@ -0,0 +1,26 @@
+/* Header file for cc2520 radio driver
+ *
+ * Copyright (C) 2014 Varka Bhadram <varkab@cdac.in>
+ *                    Md.Jamal Mohiuddin <mjmohiuddin@cdac.in>
+ *                    P Sowjanya <sowjanyap@cdac.in>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ */
+
+#ifndef __CC2520_H
+#define __CC2520_H
+
+struct cc2520_platform_data {
+       int fifo;
+       int fifop;
+       int cca;
+       int sfd;
+       int reset;
+       int vreg;
+};
+
+#endif
index 6f27d4f..cd63851 100644 (file)
@@ -112,6 +112,8 @@ struct plat_stmmacenet_data {
        int riwt_off;
        int max_speed;
        int maxmtu;
+       int multicast_filter_bins;
+       int unicast_filter_entries;
        void (*fix_mac_speed)(void *priv, unsigned int speed);
        void (*bus_setup)(void __iomem *ioaddr);
        void *(*setup)(struct platform_device *pdev);
index f76994b..519064e 100644 (file)
@@ -327,6 +327,7 @@ extern unsigned long get_safe_page(gfp_t gfp_mask);
 extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
 extern int hibernate(void);
 extern bool system_entering_hibernation(void);
+extern bool hibernation_available(void);
 asmlinkage int swsusp_save(void);
 extern struct pbe *restore_pblist;
 #else /* CONFIG_HIBERNATION */
@@ -339,6 +340,7 @@ static inline void swsusp_unset_page_free(struct page *p) {}
 static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
 static inline int hibernate(void) { return -ENOSYS; }
 static inline bool system_entering_hibernation(void) { return false; }
+static inline bool hibernation_available(void) { return false; }
 #endif /* CONFIG_HIBERNATION */
 
 /* Hibernation and suspend events */
index a051321..fa5258f 100644 (file)
@@ -111,10 +111,7 @@ struct tcp_request_sock_ops;
 
 struct tcp_request_sock {
        struct inet_request_sock        req;
-#ifdef CONFIG_TCP_MD5SIG
-       /* Only used by TCP MD5 Signature so far. */
        const struct tcp_request_sock_ops *af_specific;
-#endif
        struct sock                     *listener; /* needed for TFO */
        u32                             rcv_isn;
        u32                             snt_isn;
index e2231e4..09a7cff 100644 (file)
@@ -94,8 +94,20 @@ static inline size_t iov_iter_count(struct iov_iter *i)
        return i->count;
 }
 
-static inline void iov_iter_truncate(struct iov_iter *i, size_t count)
+/*
+ * Cap the iov_iter by given limit; note that the second argument is
+ * *not* the new size - it's upper limit for such.  Passing it a value
+ * greater than the amount of data in iov_iter is fine - it'll just do
+ * nothing in that case.
+ */
+static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
 {
+       /*
+        * count doesn't have to fit in size_t - comparison extends both
+        * operands to u64 here and any value that would be truncated by
+        * conversion in assignement is by definition greater than all
+        * values of size_t, including old i->count.
+        */
        if (i->count > count)
                i->count = count;
 }
@@ -111,6 +123,9 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
 
 int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
 int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
-
+int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+                       int offset, int len);
+int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
+                     int offset, int len);
 
 #endif
index 1a64b26..9b7de1b 100644 (file)
@@ -70,7 +70,9 @@
        US_FLAG(NEEDS_CAP16,    0x00400000)                     \
                /* cannot handle READ_CAPACITY_10 */            \
        US_FLAG(IGNORE_UAS,     0x00800000)                     \
-               /* Device advertises UAS but it is broken */
+               /* Device advertises UAS but it is broken */    \
+       US_FLAG(BROKEN_FUA,     0x01000000)                     \
+               /* Cannot handle FUA in WRITE or READ CDBs */   \
 
 #define US_FLAG(name, value)   US_FL_##name = value ,
 enum { US_DO_ALL_FLAGS };
index a975edf..597b88a 100644 (file)
@@ -81,8 +81,8 @@ struct dcbnl_rtnl_ops {
        void (*setbcncfg)(struct net_device *, int, u32);
        void (*getbcnrp)(struct net_device *, int, u8 *);
        void (*setbcnrp)(struct net_device *, int, u8);
-       u8   (*setapp)(struct net_device *, u8, u16, u8);
-       u8   (*getapp)(struct net_device *, u8, u16);
+       int  (*setapp)(struct net_device *, u8, u16, u8);
+       int  (*getapp)(struct net_device *, u8, u16);
        u8   (*getfeatcfg)(struct net_device *, int, u8 *);
        u8   (*setfeatcfg)(struct net_device *, int, u8);
 
index 7e64bd8..6667a05 100644 (file)
@@ -1,6 +1,19 @@
 #ifndef _NET_FLOW_KEYS_H
 #define _NET_FLOW_KEYS_H
 
+/* struct flow_keys:
+ *     @src: source ip address in case of IPv4
+ *           For IPv6 it contains 32bit hash of src address
+ *     @dst: destination ip address in case of IPv4
+ *           For IPv6 it contains 32bit hash of dst address
+ *     @ports: port numbers of Transport header
+ *             port16[0]: src port number
+ *             port16[1]: dst port number
+ *     @thoff: Transport header offset
+ *     @n_proto: Network header protocol (eg. IPv4/IPv6)
+ *     @ip_proto: Transport header protocol (eg. TCP/UDP)
+ * All the members, except thoff, are in network byte order.
+ */
 struct flow_keys {
        /* (src,dst) must be grouped, in the same way than in IP header */
        __be32 src;
@@ -10,9 +23,11 @@ struct flow_keys {
                __be16 port16[2];
        };
        u16 thoff;
+       u16 n_proto;
        u8 ip_proto;
 };
 
 bool skb_flow_dissect(const struct sk_buff *skb, struct flow_keys *flow);
 __be32 skb_flow_get_ports(const struct sk_buff *skb, int thoff, u8 ip_proto);
+u32 flow_hash_from_keys(struct flow_keys *keys);
 #endif
index b4956a5..d07b1a6 100644 (file)
@@ -205,6 +205,7 @@ struct inet6_dev {
        struct timer_list       rs_timer;
        __u8                    rs_probes;
 
+       __u8                    addr_gen_mode;
        unsigned long           tstamp; /* ipv6InterfaceTable update timestamp */
        struct rcu_head         rcu;
 };
index 6f59de9..65a8855 100644 (file)
@@ -4,10 +4,6 @@
 #include <linux/percpu_counter.h>
 
 struct netns_frags {
-       int                     nqueues;
-       struct list_head        lru_list;
-       spinlock_t              lru_lock;
-
        /* The percpu_counter "mem" need to be cacheline aligned.
         *  mem.count must not share cacheline with other writers
         */
@@ -19,25 +15,49 @@ struct netns_frags {
        int                     low_thresh;
 };
 
+/**
+ * fragment queue flags
+ *
+ * @INET_FRAG_FIRST_IN: first fragment has arrived
+ * @INET_FRAG_LAST_IN: final fragment has arrived
+ * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
+ * @INET_FRAG_EVICTED: frag queue is being evicted
+ */
+enum {
+       INET_FRAG_FIRST_IN      = BIT(0),
+       INET_FRAG_LAST_IN       = BIT(1),
+       INET_FRAG_COMPLETE      = BIT(2),
+       INET_FRAG_EVICTED       = BIT(3)
+};
+
+/**
+ * struct inet_frag_queue - fragment queue
+ *
+ * @lock: spinlock protecting the queue
+ * @timer: queue expiration timer
+ * @list: hash bucket list
+ * @refcnt: reference count of the queue
+ * @fragments: received fragments head
+ * @fragments_tail: received fragments tail
+ * @stamp: timestamp of the last received fragment
+ * @len: total length of the original datagram
+ * @meat: length of received fragments so far
+ * @flags: fragment queue flags
+ * @max_size: (ipv4 only) maximum received fragment size with IP_DF set
+ * @net: namespace that this frag belongs to
+ */
 struct inet_frag_queue {
        spinlock_t              lock;
-       struct timer_list       timer;      /* when will this queue expire? */
-       struct list_head        lru_list;   /* lru list member */
+       struct timer_list       timer;
        struct hlist_node       list;
        atomic_t                refcnt;
-       struct sk_buff          *fragments; /* list of received fragments */
+       struct sk_buff          *fragments;
        struct sk_buff          *fragments_tail;
        ktime_t                 stamp;
-       int                     len;        /* total length of orig datagram */
+       int                     len;
        int                     meat;
-       __u8                    last_in;    /* first/last segment arrived? */
-
-#define INET_FRAG_COMPLETE     4
-#define INET_FRAG_FIRST_IN     2
-#define INET_FRAG_LAST_IN      1
-
+       __u8                    flags;
        u16                     max_size;
-
        struct netns_frags      *net;
 };
 
@@ -48,7 +68,7 @@ struct inet_frag_queue {
  *            rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
  *            struct frag_queue))
  */
-#define INETFRAGS_MAXDEPTH             128
+#define INETFRAGS_MAXDEPTH     128
 
 struct inet_frag_bucket {
        struct hlist_head       chain;
@@ -57,49 +77,52 @@ struct inet_frag_bucket {
 
 struct inet_frags {
        struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
-       /* This rwlock is a global lock (seperate per IPv4, IPv6 and
-        * netfilter). Important to keep this on a seperate cacheline.
-        * Its primarily a rebuild protection rwlock.
-        */
-       rwlock_t                lock ____cacheline_aligned_in_smp;
-       int                     secret_interval;
-       struct timer_list       secret_timer;
+
+       struct work_struct      frags_work;
+       unsigned int next_bucket;
+       unsigned long last_rebuild_jiffies;
+       bool rebuild;
 
        /* The first call to hashfn is responsible to initialize
         * rnd. This is best done with net_get_random_once.
+        *
+        * rnd_seqlock is used to let hash insertion detect
+        * when it needs to re-lookup the hash chain to use.
         */
        u32                     rnd;
+       seqlock_t               rnd_seqlock;
        int                     qsize;
 
-       unsigned int            (*hashfn)(struct inet_frag_queue *);
-       bool                    (*match)(struct inet_frag_queue *q, void *arg);
+       unsigned int            (*hashfn)(const struct inet_frag_queue *);
+       bool                    (*match)(const struct inet_frag_queue *q,
+                                        const void *arg);
        void                    (*constructor)(struct inet_frag_queue *q,
-                                               void *arg);
+                                              const void *arg);
        void                    (*destructor)(struct inet_frag_queue *);
        void                    (*skb_free)(struct sk_buff *);
        void                    (*frag_expire)(unsigned long data);
+       struct kmem_cache       *frags_cachep;
+       const char              *frags_cache_name;
 };
 
-void inet_frags_init(struct inet_frags *);
+int inet_frags_init(struct inet_frags *);
 void inet_frags_fini(struct inet_frags *);
 
 void inet_frags_init_net(struct netns_frags *nf);
 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
 
 void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q,
-                               struct inet_frags *f, int *work);
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force);
+void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
-               struct inet_frags *f, void *key, unsigned int hash)
-       __releases(&f->lock);
+               struct inet_frags *f, void *key, unsigned int hash);
+
 void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
                                   const char *prefix);
 
 static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
 {
        if (atomic_dec_and_test(&q->refcnt))
-               inet_frag_destroy(q, f, NULL);
+               inet_frag_destroy(q, f);
 }
 
 /* Memory Tracking Functions. */
@@ -131,9 +154,9 @@ static inline void init_frag_mem_limit(struct netns_frags *nf)
        percpu_counter_init(&nf->mem, 0);
 }
 
-static inline int sum_frag_mem_limit(struct netns_frags *nf)
+static inline unsigned int sum_frag_mem_limit(struct netns_frags *nf)
 {
-       int res;
+       unsigned int res;
 
        local_bh_disable();
        res = percpu_counter_sum_positive(&nf->mem);
@@ -142,31 +165,6 @@ static inline int sum_frag_mem_limit(struct netns_frags *nf)
        return res;
 }
 
-static inline void inet_frag_lru_move(struct inet_frag_queue *q)
-{
-       spin_lock(&q->net->lru_lock);
-       if (!list_empty(&q->lru_list))
-               list_move_tail(&q->lru_list, &q->net->lru_list);
-       spin_unlock(&q->net->lru_lock);
-}
-
-static inline void inet_frag_lru_del(struct inet_frag_queue *q)
-{
-       spin_lock(&q->net->lru_lock);
-       list_del_init(&q->lru_list);
-       q->net->nqueues--;
-       spin_unlock(&q->net->lru_lock);
-}
-
-static inline void inet_frag_lru_add(struct netns_frags *nf,
-                                    struct inet_frag_queue *q)
-{
-       spin_lock(&nf->lru_lock);
-       list_add_tail(&q->lru_list, &nf->lru_list);
-       q->net->nqueues++;
-       spin_unlock(&nf->lru_lock);
-}
-
 /* RFC 3168 support :
  * We want to check ECN values of all fragments, do detect invalid combinations.
  * In ipq->ecn, we store the OR value of each ip4_frag_ecn() fragment value.
index b1edf17..a829b77 100644 (file)
@@ -88,8 +88,10 @@ struct inet_request_sock {
                                acked      : 1,
                                no_srccheck: 1;
        kmemcheck_bitfield_end(flags);
-       struct ip_options_rcu   *opt;
-       struct sk_buff          *pktopts;
+       union {
+               struct ip_options_rcu   *opt;
+               struct sk_buff          *pktopts;
+       };
        u32                     ir_mark;
 };
 
index 61474ea..6c56603 100644 (file)
@@ -108,6 +108,7 @@ struct inet_timewait_sock {
 #define tw_family              __tw_common.skc_family
 #define tw_state               __tw_common.skc_state
 #define tw_reuse               __tw_common.skc_reuse
+#define tw_ipv6only            __tw_common.skc_ipv6only
 #define tw_bound_dev_if                __tw_common.skc_bound_dev_if
 #define tw_node                        __tw_common.skc_nulls_node
 #define tw_bind_node           __tw_common.skc_bind_node
@@ -131,7 +132,7 @@ struct inet_timewait_sock {
        __be16                  tw_sport;
        kmemcheck_bitfield_begin(flags);
        /* And these are ours. */
-       unsigned int            tw_ipv6only     : 1,
+       unsigned int            tw_pad0         : 1,    /* 1 bit hole */
                                tw_transparent  : 1,
                                tw_flowlabel    : 20,
                                tw_pad          : 2,    /* 2 bits hole */
index 0e795df..db4a771 100644 (file)
@@ -31,6 +31,7 @@
 #include <net/route.h>
 #include <net/snmp.h>
 #include <net/flow.h>
+#include <net/flow_keys.h>
 
 struct sock;
 
@@ -215,6 +216,12 @@ static inline int inet_is_local_reserved_port(struct net *net, int port)
                return 0;
        return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
 }
+
+static inline bool sysctl_dev_name_is_allowed(const char *name)
+{
+       return strcmp(name, "default") != 0  && strcmp(name, "all") != 0;
+}
+
 #else
 static inline int inet_is_local_reserved_port(struct net *net, int port)
 {
@@ -309,16 +316,7 @@ static inline unsigned int ip_skb_dst_mtu(const struct sk_buff *skb)
        }
 }
 
-#define IP_IDENTS_SZ 2048u
-extern atomic_t *ip_idents;
-
-static inline u32 ip_idents_reserve(u32 hash, int segs)
-{
-       atomic_t *id_ptr = ip_idents + hash % IP_IDENTS_SZ;
-
-       return atomic_add_return(segs, id_ptr) - segs;
-}
-
+u32 ip_idents_reserve(u32 hash, int segs);
 void __ip_select_ident(struct iphdr *iph, int segs);
 
 static inline void ip_select_ident_segs(struct sk_buff *skb, struct sock *sk, int segs)
@@ -353,6 +351,19 @@ static inline __wsum inet_compute_pseudo(struct sk_buff *skb, int proto)
                                  skb->len, proto, 0);
 }
 
+static inline void inet_set_txhash(struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct flow_keys keys;
+
+       keys.src = inet->inet_saddr;
+       keys.dst = inet->inet_daddr;
+       keys.port16[0] = inet->inet_sport;
+       keys.port16[1] = inet->inet_dport;
+
+       sk->sk_txhash = flow_hash_from_keys(&keys);
+}
+
 /*
  *     Map a multicast IP onto multicast MAC for type ethernet.
  */
@@ -481,7 +492,6 @@ static inline struct sk_buff *ip_check_defrag(struct sk_buff *skb, u32 user)
 }
 #endif
 int ip_frag_mem(struct net *net);
-int ip_frag_nqueues(struct net *net);
 
 /*
  *     Functions provided by ip_forward.c
index 574337f..a2db816 100644 (file)
@@ -19,6 +19,7 @@
 #include <net/if_inet6.h>
 #include <net/ndisc.h>
 #include <net/flow.h>
+#include <net/flow_keys.h>
 #include <net/snmp.h>
 
 #define SIN6_LEN_RFC2133       24
@@ -298,11 +299,6 @@ static inline bool ipv6_accept_ra(struct inet6_dev *idev)
 }
 
 #if IS_ENABLED(CONFIG_IPV6)
-static inline int ip6_frag_nqueues(struct net *net)
-{
-       return net->ipv6.frags.nqueues;
-}
-
 static inline int ip6_frag_mem(struct net *net)
 {
        return sum_frag_mem_limit(&net->ipv6.frags);
@@ -495,8 +491,8 @@ struct ip6_create_arg {
        u8 ecn;
 };
 
-void ip6_frag_init(struct inet_frag_queue *q, void *a);
-bool ip6_frag_match(struct inet_frag_queue *q, void *a);
+void ip6_frag_init(struct inet_frag_queue *q, const void *a);
+bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
 
 /*
  *     Equivalent of ipv4 struct ip
@@ -557,24 +553,29 @@ static inline u32 __ipv6_addr_jhash(const struct in6_addr *a, const u32 initval)
 static inline bool ipv6_addr_loopback(const struct in6_addr *a)
 {
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
-       const unsigned long *ul = (const unsigned long *)a;
+       const __be64 *be = (const __be64 *)a;
 
-       return (ul[0] | (ul[1] ^ cpu_to_be64(1))) == 0UL;
+       return (be[0] | (be[1] ^ cpu_to_be64(1))) == 0UL;
 #else
        return (a->s6_addr32[0] | a->s6_addr32[1] |
-               a->s6_addr32[2] | (a->s6_addr32[3] ^ htonl(1))) == 0;
+               a->s6_addr32[2] | (a->s6_addr32[3] ^ cpu_to_be32(1))) == 0;
 #endif
 }
 
+/*
+ * Note that we must __force cast these to unsigned long to make sparse happy,
+ * since all of the endian-annotated types are fixed size regardless of arch.
+ */
 static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
 {
        return (
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
-               *(__be64 *)a |
+               *(unsigned long *)a |
 #else
-               (a->s6_addr32[0] | a->s6_addr32[1]) |
+               (__force unsigned long)(a->s6_addr32[0] | a->s6_addr32[1]) |
 #endif
-               (a->s6_addr32[2] ^ htonl(0x0000ffff))) == 0UL;
+               (__force unsigned long)(a->s6_addr32[2] ^
+                                       cpu_to_be32(0x0000ffff))) == 0UL;
 }
 
 /*
@@ -684,6 +685,50 @@ static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
        return hlimit;
 }
 
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6_set_txhash(struct sock *sk)
+{
+       struct inet_sock *inet = inet_sk(sk);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flow_keys keys;
+
+       keys.src = (__force __be32)ipv6_addr_hash(&np->saddr);
+       keys.dst = (__force __be32)ipv6_addr_hash(&sk->sk_v6_daddr);
+       keys.port16[0] = inet->inet_sport;
+       keys.port16[1] = inet->inet_dport;
+
+       sk->sk_txhash = flow_hash_from_keys(&keys);
+}
+
+static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+                                       __be32 flowlabel, bool autolabel)
+{
+       if (!flowlabel && (autolabel || net->ipv6.sysctl.auto_flowlabels)) {
+               __be32 hash;
+
+               hash = skb_get_hash(skb);
+
+               /* Since this is being sent on the wire obfuscate hash a bit
+                * to minimize possbility that any useful information to an
+                * attacker is leaked. Only lower 20 bits are relevant.
+                */
+               hash ^= hash >> 12;
+
+               flowlabel = hash & IPV6_FLOWLABEL_MASK;
+       }
+
+       return flowlabel;
+}
+#else
+static inline void ip6_set_txhash(struct sock *sk) { }
+static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
+                                       __be32 flowlabel, bool autolabel)
+{
+       return flowlabel;
+}
+#endif
+
+
 /*
  *     Header manipulation
  */
index a591053..2e67cdd 100644 (file)
@@ -80,6 +80,25 @@ struct ieee802154_dev {
 #define        IEEE802154_HW_OMIT_CKSUM        0x00000001
 /* Indicates that receiver will autorespond with ACK frames. */
 #define        IEEE802154_HW_AACK              0x00000002
+/* Indicates that transceiver will support transmit power setting. */
+#define        IEEE802154_HW_TXPOWER           0x00000004
+/* Indicates that transceiver will support listen before transmit. */
+#define        IEEE802154_HW_LBT               0x00000008
+/* Indicates that transceiver will support cca mode setting. */
+#define        IEEE802154_HW_CCA_MODE          0x00000010
+/* Indicates that transceiver will support cca ed level setting. */
+#define        IEEE802154_HW_CCA_ED_LEVEL      0x00000020
+/* Indicates that transceiver will support csma (max_be, min_be, csma retries)
+ * settings. */
+#define        IEEE802154_HW_CSMA_PARAMS       0x00000040
+/* Indicates that transceiver will support ARET frame retries setting. */
+#define        IEEE802154_HW_FRAME_RETRIES     0x00000080
+
+/* This groups the most common CSMA support fields into one. */
+#define IEEE802154_HW_CSMA             (IEEE802154_HW_CCA_MODE | \
+                                        IEEE802154_HW_CCA_ED_LEVEL | \
+                                        IEEE802154_HW_CSMA_PARAMS | \
+                                        IEEE802154_HW_FRAME_RETRIES)
 
 /* struct ieee802154_ops - callbacks from mac802154 to the driver
  *
index 7277caf..47f4254 100644 (file)
@@ -203,7 +203,6 @@ struct neigh_table {
        void                    (*proxy_redo)(struct sk_buff *skb);
        char                    *id;
        struct neigh_parms      parms;
-       /* HACK. gc_* should follow parms without a gap! */
        int                     gc_interval;
        int                     gc_thresh1;
        int                     gc_thresh2;
index 0e3d08e..57c8803 100644 (file)
@@ -18,7 +18,6 @@ struct nf_conntrack_ecache {
        u16 ctmask;             /* bitmask of ct events to be delivered */
        u16 expmask;            /* bitmask of expect events to be delivered */
        u32 portid;             /* netlink portid of destroyer */
-       struct timer_list timeout;
 };
 
 static inline struct nf_conntrack_ecache *
@@ -216,8 +215,23 @@ void nf_conntrack_ecache_pernet_fini(struct net *net);
 
 int nf_conntrack_ecache_init(void);
 void nf_conntrack_ecache_fini(void);
-#else /* CONFIG_NF_CONNTRACK_EVENTS */
 
+static inline void nf_conntrack_ecache_delayed_work(struct net *net)
+{
+       if (!delayed_work_pending(&net->ct.ecache_dwork)) {
+               schedule_delayed_work(&net->ct.ecache_dwork, HZ);
+               net->ct.ecache_dwork_pending = true;
+       }
+}
+
+static inline void nf_conntrack_ecache_work(struct net *net)
+{
+       if (net->ct.ecache_dwork_pending) {
+               net->ct.ecache_dwork_pending = false;
+               mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0);
+       }
+}
+#else /* CONFIG_NF_CONNTRACK_EVENTS */
 static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
                                            struct nf_conn *ct) {}
 static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
@@ -255,6 +269,14 @@ static inline int nf_conntrack_ecache_init(void)
 static inline void nf_conntrack_ecache_fini(void)
 {
 }
+
+static inline void nf_conntrack_ecache_delayed_work(struct net *net)
+{
+}
+
+static inline void nf_conntrack_ecache_work(struct net *net)
+{
+}
 #endif /* CONFIG_NF_CONNTRACK_EVENTS */
 
 #endif /*_NF_CONNTRACK_ECACHE_H*/
index 99eac12..534e1f2 100644 (file)
 #define NF_LOG_UID             0x08    /* Log UID owning local socket */
 #define NF_LOG_MASK            0x0f
 
-#define NF_LOG_TYPE_LOG                0x01
-#define NF_LOG_TYPE_ULOG       0x02
+enum nf_log_type {
+       NF_LOG_TYPE_LOG         = 0,
+       NF_LOG_TYPE_ULOG,
+       NF_LOG_TYPE_MAX
+};
 
 struct nf_loginfo {
        u_int8_t type;
@@ -40,10 +43,10 @@ typedef void nf_logfn(struct net *net,
                      const char *prefix);
 
 struct nf_logger {
-       struct module   *me;
-       nf_logfn        *logfn;
-       char            *name;
-       struct list_head        list[NFPROTO_NUMPROTO];
+       char                    *name;
+       enum nf_log_type        type;
+       nf_logfn                *logfn;
+       struct module           *me;
 };
 
 /* Function to register/unregister log function. */
@@ -58,6 +61,13 @@ int nf_log_bind_pf(struct net *net, u_int8_t pf,
                   const struct nf_logger *logger);
 void nf_log_unbind_pf(struct net *net, u_int8_t pf);
 
+int nf_logger_find_get(int pf, enum nf_log_type type);
+void nf_logger_put(int pf, enum nf_log_type type);
+void nf_logger_request_module(int pf, enum nf_log_type type);
+
+#define MODULE_ALIAS_NF_LOGGER(family, type) \
+       MODULE_ALIAS("nf-logger-" __stringify(family) "-" __stringify(type))
+
 /* Calls the registered backend logging function */
 __printf(8, 9)
 void nf_log_packet(struct net *net,
@@ -69,4 +79,24 @@ void nf_log_packet(struct net *net,
                   const struct nf_loginfo *li,
                   const char *fmt, ...);
 
+struct nf_log_buf;
+
+struct nf_log_buf *nf_log_buf_open(void);
+__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...);
+void nf_log_buf_close(struct nf_log_buf *m);
+
+/* common logging functions */
+int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset);
+int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset,
+                          unsigned int logflags);
+void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
+void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+                              unsigned int hooknum, const struct sk_buff *skb,
+                              const struct net_device *in,
+                              const struct net_device *out,
+                              const struct nf_loginfo *loginfo,
+                              const char *prefix);
+
 #endif /* _NF_LOG_H */
index 7ee6ce6..c4d8619 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/x_tables.h>
 #include <linux/netfilter/nf_tables.h>
+#include <linux/u64_stats_sync.h>
 #include <net/netlink.h>
 
 #define NFT_JUMP_STACK_SIZE    16
@@ -503,9 +504,9 @@ enum nft_chain_flags {
  *     @net: net namespace that this chain belongs to
  *     @table: table that this chain belongs to
  *     @handle: chain handle
- *     @flags: bitmask of enum nft_chain_flags
  *     @use: number of jump references to this chain
  *     @level: length of longest path to this chain
+ *     @flags: bitmask of enum nft_chain_flags
  *     @name: name of the chain
  */
 struct nft_chain {
@@ -514,9 +515,9 @@ struct nft_chain {
        struct net                      *net;
        struct nft_table                *table;
        u64                             handle;
-       u8                              flags;
-       u16                             use;
+       u32                             use;
        u16                             level;
+       u8                              flags;
        char                            name[NFT_CHAIN_MAXNAMELEN];
 };
 
@@ -528,8 +529,9 @@ enum nft_chain_type {
 };
 
 struct nft_stats {
-       u64 bytes;
-       u64 pkts;
+       u64                     bytes;
+       u64                     pkts;
+       struct u64_stats_sync   syncp;
 };
 
 #define NFT_HOOK_OPS_MAX               2
diff --git a/include/net/netfilter/xt_log.h b/include/net/netfilter/xt_log.h
deleted file mode 100644 (file)
index 9d9756c..0000000
+++ /dev/null
@@ -1,54 +0,0 @@
-#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
-
-struct sbuff {
-       unsigned int    count;
-       char            buf[S_SIZE + 1];
-};
-static struct sbuff emergency, *emergency_ptr = &emergency;
-
-static __printf(2, 3) int sb_add(struct sbuff *m, const char *f, ...)
-{
-       va_list args;
-       int len;
-
-       if (likely(m->count < S_SIZE)) {
-               va_start(args, f);
-               len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args);
-               va_end(args);
-               if (likely(m->count + len < S_SIZE)) {
-                       m->count += len;
-                       return 0;
-               }
-       }
-       m->count = S_SIZE;
-       printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n");
-       return -1;
-}
-
-static struct sbuff *sb_open(void)
-{
-       struct sbuff *m = kmalloc(sizeof(*m), GFP_ATOMIC);
-
-       if (unlikely(!m)) {
-               local_bh_disable();
-               do {
-                       m = xchg(&emergency_ptr, NULL);
-               } while (!m);
-       }
-       m->count = 0;
-       return m;
-}
-
-static void sb_close(struct sbuff *m)
-{
-       m->buf[m->count] = 0;
-       printk("%s\n", m->buf);
-
-       if (likely(m != &emergency))
-               kfree(m);
-       else {
-               emergency_ptr = m;
-               local_bh_enable();
-       }
-}
-
index 2b47eaa..6c10762 100644 (file)
@@ -949,12 +949,12 @@ static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
  * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
  * @skb: socket buffer to add attribute to
  * @attrtype: attribute type
- * @jiffies: number of msecs in jiffies
+ * @njiffies: number of jiffies to convert to msecs
  */
 static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
-                               unsigned long jiffies)
+                               unsigned long njiffies)
 {
-       u64 tmp = jiffies_to_msecs(jiffies);
+       u64 tmp = jiffies_to_msecs(njiffies);
        return nla_put(skb, attrtype, sizeof(u64), &tmp);
 }
 
index 773cce3..29d6a94 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/list.h>
 #include <linux/list_nulls.h>
 #include <linux/atomic.h>
+#include <linux/workqueue.h>
 #include <linux/netfilter/nf_conntrack_tcp.h>
 #include <linux/seqlock.h>
 
@@ -73,6 +74,10 @@ struct ct_pcpu {
 struct netns_ct {
        atomic_t                count;
        unsigned int            expect_count;
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+       struct delayed_work ecache_dwork;
+       bool ecache_dwork_pending;
+#endif
 #ifdef CONFIG_SYSCTL
        struct ctl_table_header *sysctl_header;
        struct ctl_table_header *acct_sysctl_header;
@@ -82,7 +87,6 @@ struct netns_ct {
 #endif
        char                    *slabname;
        unsigned int            sysctl_log_invalid; /* Log invalid packets */
-       unsigned int            sysctl_events_retry_timeout;
        int                     sysctl_events;
        int                     sysctl_acct;
        int                     sysctl_auto_assign_helper;
index 079030c..e207096 100644 (file)
@@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
 struct netns_ieee802154_lowpan {
        struct netns_sysctl_lowpan sysctl;
        struct netns_frags      frags;
-       u16                     max_dsize;
+       int                     max_dsize;
 };
 
 #endif
index 19d3446..eade27a 100644 (file)
@@ -28,6 +28,7 @@ struct netns_sysctl_ipv6 {
        int ip6_rt_mtu_expires;
        int ip6_rt_min_advmss;
        int flowlabel_consistency;
+       int auto_flowlabels;
        int icmpv6_time;
        int anycast_src_echo_reply;
        int fwmark_reflect;
index 26a394c..eee608b 100644 (file)
@@ -13,8 +13,8 @@ struct netns_nftables {
        struct nft_af_info      *inet;
        struct nft_af_info      *arp;
        struct nft_af_info      *bridge;
+       unsigned int            base_seq;
        u8                      gencursor;
-       u8                      genctr;
 };
 
 #endif
index 02fe40f..c24060e 100644 (file)
@@ -15,11 +15,5 @@ struct netns_xt {
        struct ebt_table *frame_filter;
        struct ebt_table *frame_nat;
 #endif
-#if IS_ENABLED(CONFIG_IP_NF_TARGET_ULOG)
-       bool ulog_warn_deprecated;
-#endif
-#if IS_ENABLED(CONFIG_BRIDGE_EBT_ULOG)
-       bool ebt_ulog_warn_deprecated;
-#endif
 };
 #endif
index 72240e5..e21b9f9 100644 (file)
@@ -136,6 +136,7 @@ void rtnl_af_unregister(struct rtnl_af_ops *ops);
 
 struct net *rtnl_link_get_net(struct net *src_net, struct nlattr *tb[]);
 struct net_device *rtnl_create_link(struct net *net, char *ifname,
+                                   unsigned char name_assign_type,
                                    const struct rtnl_link_ops *ops,
                                    struct nlattr *tb[]);
 int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
index 624f985..a3cfb8e 100644 (file)
@@ -231,7 +231,7 @@ struct qdisc_skb_cb {
        unsigned int            pkt_len;
        u16                     slave_dev_queue_mapping;
        u16                     _pad;
-       unsigned char           data[20];
+       unsigned char           data[24];
 };
 
 static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
index 4b7cd69..f22538e 100644 (file)
@@ -118,6 +118,7 @@ typedef enum {
 #define SCTP_MAX_NUM_COMMANDS 14
 
 typedef union {
+       void *zero_all; /* Set to NULL to clear the entire union */
        __s32 i32;
        __u32 u32;
        __be32 be32;
@@ -154,7 +155,7 @@ typedef union {
 static inline sctp_arg_t       \
 SCTP_## name (type arg)                \
 { sctp_arg_t retval;\
-  memset(&retval, 0, sizeof(sctp_arg_t));\
+  retval.zero_all = NULL;\
   retval.elt = arg;\
   return retval;\
 }
@@ -191,7 +192,7 @@ static inline sctp_arg_t SCTP_NOFORCE(void)
 static inline sctp_arg_t SCTP_NULL(void)
 {
        sctp_arg_t retval;
-       memset(&retval, 0, sizeof(sctp_arg_t));
+       retval.zero_all = NULL;
        return retval;
 }
 
@@ -202,27 +203,49 @@ typedef struct {
 
 typedef struct {
        sctp_cmd_t cmds[SCTP_MAX_NUM_COMMANDS];
-       __u8 next_free_slot;
-       __u8 next_cmd;
+       sctp_cmd_t *last_used_slot;
+       sctp_cmd_t *next_cmd;
 } sctp_cmd_seq_t;
 
 
 /* Initialize a block of memory as a command sequence.
  * Return 0 if the initialization fails.
  */
-int sctp_init_cmd_seq(sctp_cmd_seq_t *seq);
+static inline int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
+{
+       /* cmds[] is filled backwards to simplify the overflow BUG() check */
+       seq->last_used_slot = seq->cmds + SCTP_MAX_NUM_COMMANDS;
+       seq->next_cmd = seq->last_used_slot;
+       return 1;               /* We always succeed.  */
+}
+
 
 /* Add a command to an sctp_cmd_seq_t.
  *
  * Use the SCTP_* constructors defined by SCTP_ARG_CONSTRUCTOR() above
  * to wrap data which goes in the obj argument.
  */
-void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj);
+static inline void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb,
+                                  sctp_arg_t obj)
+{
+       sctp_cmd_t *cmd = seq->last_used_slot - 1;
+
+       BUG_ON(cmd < seq->cmds);
+
+       cmd->verb = verb;
+       cmd->obj = obj;
+       seq->last_used_slot = cmd;
+}
 
 /* Return the next command structure in an sctp_cmd_seq.
  * Return NULL at the end of the sequence.
  */
-sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq);
+static inline sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq)
+{
+       if (seq->next_cmd <= seq->last_used_slot)
+               return NULL;
 
-#endif /* __net_sctp_command_h__ */
+       return --seq->next_cmd;
+}
 
+#endif /* __net_sctp_command_h__ */
index 307728f..8c337cd 100644 (file)
@@ -311,7 +311,7 @@ typedef enum {
        SCTP_XMIT_OK,
        SCTP_XMIT_PMTU_FULL,
        SCTP_XMIT_RWND_FULL,
-       SCTP_XMIT_NAGLE_DELAY,
+       SCTP_XMIT_DELAY,
 } sctp_xmit_t;
 
 /* These are the commands for manipulating transports.  */
index 8e4de46..f6e7397 100644 (file)
@@ -109,6 +109,7 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk,
                    struct sctp_association *asoc);
 extern struct percpu_counter sctp_sockets_allocated;
 int sctp_asconf_mgmt(struct sctp_sock *, struct sctp_sockaddr_entry *);
+struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
 
 /*
  * sctp/primitive.c
@@ -388,27 +389,6 @@ static inline int sctp_list_single_entry(struct list_head *head)
        return (head->next != head) && (head->next == head->prev);
 }
 
-/* Generate a random jitter in the range of -50% ~ +50% of input RTO. */
-static inline __s32 sctp_jitter(__u32 rto)
-{
-       static __u32 sctp_rand;
-       __s32 ret;
-
-       /* Avoid divide by zero. */
-       if (!rto)
-               rto = 1;
-
-       sctp_rand += jiffies;
-       sctp_rand ^= (sctp_rand << 12);
-       sctp_rand ^= (sctp_rand >> 20);
-
-       /* Choose random number from 0 to rto, then move to -50% ~ +50%
-        * of rto.
-        */
-       ret = sctp_rand % rto - (rto >> 1);
-       return ret;
-}
-
 /* Break down data chunks at this point.  */
 static inline int sctp_frag_point(const struct sctp_association *asoc, int pmtu)
 {
@@ -574,6 +554,8 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
 static inline void sctp_v4_map_v6(union sctp_addr *addr)
 {
        addr->v6.sin6_family = AF_INET6;
+       addr->v6.sin6_flowinfo = 0;
+       addr->v6.sin6_scope_id = 0;
        addr->v6.sin6_port = addr->v4.sin_port;
        addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        addr->v6.sin6_addr.s6_addr32[0] = 0;
index f38588b..4ff3f67 100644 (file)
@@ -207,7 +207,9 @@ struct sctp_sock {
        struct sctp_paddrparams paddrparam;
        struct sctp_event_subscribe subscribe;
        struct sctp_assocparams assocparams;
+
        int user_frag;
+
        __u32 autoclose;
        __u8 nodelay;
        __u8 disable_fragments;
@@ -215,6 +217,8 @@ struct sctp_sock {
        __u8 frag_interleave;
        __u32 adaptation_ind;
        __u32 pd_point;
+       __u8 recvrcvinfo;
+       __u8 recvnxtinfo;
 
        atomic_t pd_mode;
        /* Receive to here while partial delivery is in effect. */
@@ -461,10 +465,6 @@ struct sctp_af {
                                         int saddr);
        void            (*from_sk)      (union sctp_addr *,
                                         struct sock *sk);
-       void            (*to_sk_saddr)  (union sctp_addr *,
-                                        struct sock *sk);
-       void            (*to_sk_daddr)  (union sctp_addr *,
-                                        struct sock *sk);
        void            (*from_addr_param) (union sctp_addr *,
                                            union sctp_addr_param *,
                                            __be16 port, int iif);
@@ -505,7 +505,9 @@ struct sctp_pf {
        int  (*supported_addrs)(const struct sctp_sock *, __be16 *);
        struct sock *(*create_accept_sk) (struct sock *sk,
                                          struct sctp_association *asoc);
-       void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
+       int (*addr_to_user)(struct sctp_sock *sk, union sctp_addr *addr);
+       void (*to_sk_saddr)(union sctp_addr *, struct sock *sk);
+       void (*to_sk_daddr)(union sctp_addr *, struct sock *sk);
        struct sctp_af *af;
 };
 
@@ -1919,7 +1921,8 @@ struct sctp_chunk *sctp_get_ecne_prepend(struct sctp_association *asoc);
 /* A convenience structure to parse out SCTP specific CMSGs. */
 typedef struct sctp_cmsgs {
        struct sctp_initmsg *init;
-       struct sctp_sndrcvinfo *info;
+       struct sctp_sndrcvinfo *srinfo;
+       struct sctp_sndinfo *sinfo;
 } sctp_cmsgs_t;
 
 /* Structure for tracking memory objects */
index daacb32..cccdcfd 100644 (file)
@@ -129,7 +129,12 @@ struct sctp_ulpevent *sctp_ulpevent_make_sender_dry_event(
        const struct sctp_association *asoc, gfp_t gfp);
 
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
-       struct msghdr *);
+                                  struct msghdr *);
+void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *);
+void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *, struct sock *sk);
+
 __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event);
 
 /* Is this event type enabled? */
@@ -155,10 +160,3 @@ static inline int sctp_ulpevent_is_enabled(const struct sctp_ulpevent *event,
 }
 
 #endif /* __sctp_ulpevent_h__ */
-
-
-
-
-
-
-
index 07b7fcd..b91c886 100644 (file)
@@ -181,7 +181,8 @@ struct sock_common {
        unsigned short          skc_family;
        volatile unsigned char  skc_state;
        unsigned char           skc_reuse:4;
-       unsigned char           skc_reuseport:4;
+       unsigned char           skc_reuseport:1;
+       unsigned char           skc_ipv6only:1;
        int                     skc_bound_dev_if;
        union {
                struct hlist_node       skc_bind_node;
@@ -272,6 +273,7 @@ struct cg_proto;
   *    @sk_rcvtimeo: %SO_RCVTIMEO setting
   *    @sk_sndtimeo: %SO_SNDTIMEO setting
   *    @sk_rxhash: flow hash received from netif layer
+  *    @sk_txhash: computed flow hash for use on transmit
   *    @sk_filter: socket filtering instructions
   *    @sk_protinfo: private area, net family specific, when not using slab
   *    @sk_timer: sock cleanup timer
@@ -317,6 +319,7 @@ struct sock {
 #define sk_state               __sk_common.skc_state
 #define sk_reuse               __sk_common.skc_reuse
 #define sk_reuseport           __sk_common.skc_reuseport
+#define sk_ipv6only            __sk_common.skc_ipv6only
 #define sk_bound_dev_if                __sk_common.skc_bound_dev_if
 #define sk_bind_node           __sk_common.skc_bind_node
 #define sk_prot                        __sk_common.skc_prot
@@ -345,6 +348,7 @@ struct sock {
 #ifdef CONFIG_RPS
        __u32                   sk_rxhash;
 #endif
+       __u32                   sk_txhash;
 #ifdef CONFIG_NET_RX_BUSY_POLL
        unsigned int            sk_napi_id;
        unsigned int            sk_ll_usec;
@@ -656,6 +660,20 @@ static inline void sk_add_bind_node(struct sock *sk,
 #define sk_for_each_bound(__sk, list) \
        hlist_for_each_entry(__sk, list, sk_bind_node)
 
+/**
+ * sk_nulls_for_each_entry_offset - iterate over a list at a given struct offset
+ * @tpos:      the type * to use as a loop cursor.
+ * @pos:       the &struct hlist_node to use as a loop cursor.
+ * @head:      the head for your list.
+ * @offset:    offset of hlist_node within the struct.
+ *
+ */
+#define sk_nulls_for_each_entry_offset(tpos, pos, head, offset)                       \
+       for (pos = (head)->first;                                              \
+            (!is_a_nulls(pos)) &&                                             \
+               ({ tpos = (typeof(*tpos) *)((void *)pos - offset); 1;});       \
+            pos = pos->next)
+
 static inline struct user_namespace *sk_user_ns(struct sock *sk)
 {
        /* Careful only use this in a context where these parameters
@@ -689,7 +707,6 @@ enum sock_flags {
        SOCK_TIMESTAMPING_RX_SOFTWARE,  /* %SOF_TIMESTAMPING_RX_SOFTWARE */
        SOCK_TIMESTAMPING_SOFTWARE,     /* %SOF_TIMESTAMPING_SOFTWARE */
        SOCK_TIMESTAMPING_RAW_HARDWARE, /* %SOF_TIMESTAMPING_RAW_HARDWARE */
-       SOCK_TIMESTAMPING_SYS_HARDWARE, /* %SOF_TIMESTAMPING_SYS_HARDWARE */
        SOCK_FASYNC, /* fasync() active */
        SOCK_RXQ_OVFL,
        SOCK_ZEROCOPY, /* buffers from userspace */
@@ -792,8 +809,7 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
  * Do not take into account this skb truesize,
  * to allow even a single big packet to come.
  */
-static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff *skb,
-                                    unsigned int limit)
+static inline bool sk_rcvqueues_full(const struct sock *sk, unsigned int limit)
 {
        unsigned int qsize = sk->sk_backlog.len + atomic_read(&sk->sk_rmem_alloc);
 
@@ -804,7 +820,7 @@ static inline bool sk_rcvqueues_full(const struct sock *sk, const struct sk_buff
 static inline __must_check int sk_add_backlog(struct sock *sk, struct sk_buff *skb,
                                              unsigned int limit)
 {
-       if (sk_rcvqueues_full(sk, skb, limit))
+       if (sk_rcvqueues_full(sk, limit))
                return -ENOBUFS;
 
        __sk_add_backlog(sk, skb);
@@ -1730,8 +1746,8 @@ sk_dst_get(struct sock *sk)
 
        rcu_read_lock();
        dst = rcu_dereference(sk->sk_dst_cache);
-       if (dst)
-               dst_hold(dst);
+       if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+               dst = NULL;
        rcu_read_unlock();
        return dst;
 }
@@ -1768,9 +1784,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
 static inline void
 sk_dst_set(struct sock *sk, struct dst_entry *dst)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_set(sk, dst);
-       spin_unlock(&sk->sk_dst_lock);
+       struct dst_entry *old_dst;
+
+       sk_tx_queue_clear(sk);
+       old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
+       dst_release(old_dst);
 }
 
 static inline void
@@ -1782,9 +1800,7 @@ __sk_dst_reset(struct sock *sk)
 static inline void
 sk_dst_reset(struct sock *sk)
 {
-       spin_lock(&sk->sk_dst_lock);
-       __sk_dst_reset(sk);
-       spin_unlock(&sk->sk_dst_lock);
+       sk_dst_set(sk, NULL);
 }
 
 struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
@@ -1978,6 +1994,14 @@ static inline void sock_poll_wait(struct file *filp,
        }
 }
 
+static inline void skb_set_hash_from_sk(struct sk_buff *skb, struct sock *sk)
+{
+       if (sk->sk_txhash) {
+               skb->l4_hash = 1;
+               skb->hash = sk->sk_txhash;
+       }
+}
+
 /*
  *     Queue a received datagram if it will fit. Stream and sequenced
  *     protocols can't normally use this as they need to fit buffers in
@@ -1992,6 +2016,7 @@ static inline void skb_set_owner_w(struct sk_buff *skb, struct sock *sk)
        skb_orphan(skb);
        skb->sk = sk;
        skb->destructor = sock_wfree;
+       skb_set_hash_from_sk(skb, sk);
        /*
         * We used to take a refcount on sk, but following operation
         * is enough to guarantee sk_free() wont free this sock until
@@ -2140,16 +2165,13 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
         * - software time stamp available and wanted
         *   (SOCK_TIMESTAMPING_SOFTWARE)
         * - hardware time stamps available and wanted
-        *   (SOCK_TIMESTAMPING_SYS_HARDWARE or
-        *   SOCK_TIMESTAMPING_RAW_HARDWARE)
+        *   SOCK_TIMESTAMPING_RAW_HARDWARE
         */
        if (sock_flag(sk, SOCK_RCVTSTAMP) ||
            sock_flag(sk, SOCK_TIMESTAMPING_RX_SOFTWARE) ||
            (kt.tv64 && sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE)) ||
            (hwtstamps->hwtstamp.tv64 &&
-            sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)) ||
-           (hwtstamps->syststamp.tv64 &&
-            sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE)))
+            sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE)))
                __sock_recv_timestamp(msg, sk, skb);
        else
                sk->sk_stamp = kt;
@@ -2167,8 +2189,7 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
 #define FLAGS_TS_OR_DROPS ((1UL << SOCK_RXQ_OVFL)                      | \
                           (1UL << SOCK_RCVTSTAMP)                      | \
                           (1UL << SOCK_TIMESTAMPING_SOFTWARE)          | \
-                          (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE)      | \
-                          (1UL << SOCK_TIMESTAMPING_SYS_HARDWARE))
+                          (1UL << SOCK_TIMESTAMPING_RAW_HARDWARE))
 
        if (sk->sk_flags & FLAGS_TS_OR_DROPS)
                __sock_recv_ts_and_drops(msg, sk, skb);
index 7286db8..0aeb2eb 100644 (file)
@@ -493,14 +493,8 @@ static inline u32 tcp_cookie_time(void)
 
 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
                              u16 *mssp);
-__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mss);
-#else
-static inline __u32 cookie_v4_init_sequence(struct sock *sk,
-                                           struct sk_buff *skb,
-                                           __u16 *mss)
-{
-       return 0;
-}
+__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
+                             __u16 *mss);
 #endif
 
 __u32 cookie_init_timestamp(struct request_sock *req);
@@ -516,13 +510,6 @@ u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
                              const struct tcphdr *th, u16 *mssp);
 __u32 cookie_v6_init_sequence(struct sock *sk, const struct sk_buff *skb,
                              __u16 *mss);
-#else
-static inline __u32 cookie_v6_init_sequence(struct sock *sk,
-                                           struct sk_buff *skb,
-                                           __u16 *mss)
-{
-       return 0;
-}
 #endif
 /* tcp_output.c */
 
@@ -941,7 +928,7 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
 /* Use define here intentionally to get WARN_ON location shown at the caller */
 #define tcp_verify_left_out(tp)        WARN_ON(tcp_left_out(tp) > tp->packets_out)
 
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh);
+void tcp_enter_cwr(struct sock *sk);
 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
 
 /* The maximum number of MSS of available cwnd for which TSO defers
@@ -1098,7 +1085,7 @@ static inline int tcp_full_space(const struct sock *sk)
 
 static inline void tcp_openreq_init(struct request_sock *req,
                                    struct tcp_options_received *rx_opt,
-                                   struct sk_buff *skb)
+                                   struct sk_buff *skb, struct sock *sk)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
 
@@ -1106,7 +1093,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        req->cookie_ts = 0;
        tcp_rsk(req)->rcv_isn = TCP_SKB_CB(skb)->seq;
        tcp_rsk(req)->rcv_nxt = TCP_SKB_CB(skb)->seq + 1;
-       tcp_rsk(req)->snt_synack = 0;
+       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        req->mss = rx_opt->mss_clamp;
        req->ts_recent = rx_opt->saw_tstamp ? rx_opt->rcv_tsval : 0;
        ireq->tstamp_ok = rx_opt->tstamp_ok;
@@ -1117,6 +1104,7 @@ static inline void tcp_openreq_init(struct request_sock *req,
        ireq->ecn_ok = 0;
        ireq->ir_rmt_port = tcp_hdr(skb)->source;
        ireq->ir_num = ntohs(tcp_hdr(skb)->dest);
+       ireq->ir_mark = inet_request_mark(sk, skb);
 }
 
 extern void tcp_openreq_init_rwin(struct request_sock *req,
@@ -1585,6 +1573,11 @@ int tcp4_proc_init(void);
 void tcp4_proc_exit(void);
 #endif
 
+int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
+int tcp_conn_request(struct request_sock_ops *rsk_ops,
+                    const struct tcp_request_sock_ops *af_ops,
+                    struct sock *sk, struct sk_buff *skb);
+
 /* TCP af-specific functions */
 struct tcp_sock_af_ops {
 #ifdef CONFIG_TCP_MD5SIG
@@ -1602,6 +1595,7 @@ struct tcp_sock_af_ops {
 };
 
 struct tcp_request_sock_ops {
+       u16 mss_clamp;
 #ifdef CONFIG_TCP_MD5SIG
        struct tcp_md5sig_key   *(*md5_lookup) (struct sock *sk,
                                                struct request_sock *req);
@@ -1611,8 +1605,39 @@ struct tcp_request_sock_ops {
                                                  const struct request_sock *req,
                                                  const struct sk_buff *skb);
 #endif
+       void (*init_req)(struct request_sock *req, struct sock *sk,
+                        struct sk_buff *skb);
+#ifdef CONFIG_SYN_COOKIES
+       __u32 (*cookie_init_seq)(struct sock *sk, const struct sk_buff *skb,
+                                __u16 *mss);
+#endif
+       struct dst_entry *(*route_req)(struct sock *sk, struct flowi *fl,
+                                      const struct request_sock *req,
+                                      bool *strict);
+       __u32 (*init_seq)(const struct sk_buff *skb);
+       int (*send_synack)(struct sock *sk, struct dst_entry *dst,
+                          struct flowi *fl, struct request_sock *req,
+                          u16 queue_mapping, struct tcp_fastopen_cookie *foc);
+       void (*queue_hash_add)(struct sock *sk, struct request_sock *req,
+                              const unsigned long timeout);
 };
 
+#ifdef CONFIG_SYN_COOKIES
+static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+                                        struct sock *sk, struct sk_buff *skb,
+                                        __u16 *mss)
+{
+       return ops->cookie_init_seq(sk, skb, mss);
+}
+#else
+static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
+                                        struct sock *sk, struct sk_buff *skb,
+                                        __u16 *mss)
+{
+       return 0;
+}
+#endif
+
 int tcpv4_offload_init(void);
 
 void tcp_v4_init(void);
index 68a1fef..70f9413 100644 (file)
@@ -176,6 +176,35 @@ int udp_lib_get_port(struct sock *sk, unsigned short snum,
                     int (*)(const struct sock *, const struct sock *),
                     unsigned int hash2_nulladdr);
 
+static inline __be16 udp_flow_src_port(struct net *net, struct sk_buff *skb,
+                                      int min, int max, bool use_eth)
+{
+       u32 hash;
+
+       if (min >= max) {
+               /* Use default range */
+               inet_get_local_port_range(net, &min, &max);
+       }
+
+       hash = skb_get_hash(skb);
+       if (unlikely(!hash) && use_eth) {
+               /* Can't find a normal hash, caller has indicated an Ethernet
+                * packet so use that to compute a hash.
+                */
+               hash = jhash(skb->data, 2 * ETH_ALEN,
+                            (__force u32) skb->protocol);
+       }
+
+       /* Since this is being sent on the wire obfuscate hash a bit
+        * to minimize possbility that any useful information to an
+        * attacker is leaked. Only upper 16 bits are relevant in the
+        * computation for 16 bit port value.
+        */
+       hash ^= hash << 16;
+
+       return htons((((u64) hash * (max - min)) >> 32) + min);
+}
+
 /* net/ipv4/udp.c */
 void udp_v4_early_demux(struct sk_buff *skb);
 int udp_get_port(struct sock *sk, unsigned short snum,
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
new file mode 100644 (file)
index 0000000..ffd69cb
--- /dev/null
@@ -0,0 +1,32 @@
+#ifndef __NET_UDP_TUNNEL_H
+#define __NET_UDP_TUNNEL_H
+
+struct udp_port_cfg {
+       u8                      family;
+
+       /* Used only for kernel-created sockets */
+       union {
+               struct in_addr          local_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+               struct in6_addr         local_ip6;
+#endif
+       };
+
+       union {
+               struct in_addr          peer_ip;
+#if IS_ENABLED(CONFIG_IPV6)
+               struct in6_addr         peer_ip6;
+#endif
+       };
+
+       __be16                  local_udp_port;
+       __be16                  peer_udp_port;
+       unsigned int            use_udp_checksums:1,
+                               use_udp6_tx_checksums:1,
+                               use_udp6_rx_checksums:1;
+};
+
+int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
+                   struct socket **sockp);
+
+#endif
index 12196ce..d5f59f3 100644 (file)
@@ -45,8 +45,6 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
                   __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
                   __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
 
-__be16 vxlan_src_port(__u16 port_min, __u16 port_max, struct sk_buff *skb);
-
 /* IP header + UDP + VXLAN + Ethernet header */
 #define VXLAN_HEADROOM (20 + 8 + 8 + 14)
 /* IPv6 header + UDP + VXLAN + Ethernet header */
index 42ed789..e0ae710 100644 (file)
@@ -318,7 +318,7 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
 
 static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
 {
-       unsigned int xfer_len = blk_rq_bytes(scmd->request);
+       unsigned int xfer_len = scsi_out(scmd)->length;
        unsigned int prot_op = scsi_get_prot_op(scmd);
        unsigned int sector_size = scmd->device->sector_size;
 
index 5853c91..27ab310 100644 (file)
@@ -173,6 +173,7 @@ struct scsi_device {
        unsigned is_visible:1;  /* is the device visible in sysfs */
        unsigned wce_default_on:1;      /* Cache is ON by default */
        unsigned no_dif:1;      /* T10 PI (DIF) should be disabled */
+       unsigned broken_fua:1;          /* Don't set FUA bit */
 
        atomic_t disk_events_disable_depth; /* disable depth for disk events */
 
index eedda2c..1df3f2f 100644 (file)
@@ -116,6 +116,8 @@ struct snd_card {
        int user_ctl_count;             /* count of all user controls */
        struct list_head controls;      /* all controls for this card */
        struct list_head ctl_files;     /* active control files */
+       struct mutex user_ctl_lock;     /* protects user controls against
+                                          concurrent access */
 
        struct snd_info_entry *proc_root;       /* root for soundcard specific files */
        struct snd_info_entry *proc_id; /* the card id */
index 0fd06fe..26b4f2e 100644 (file)
 #undef __field_ext
 #define __field_ext(type, item, filter_type)   type    item;
 
+#undef __field_struct
+#define __field_struct(type, item)     type    item;
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)    type    item;
+
 #undef __array
 #define __array(type, item, len)       type    item[len];
 
 #undef __field_ext
 #define __field_ext(type, item, filter_type)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
 #undef __array
 #define __array(type, item, len)
 
@@ -315,9 +327,21 @@ static struct trace_event_functions ftrace_event_type_funcs_##call = {     \
        if (ret)                                                        \
                return ret;
 
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)                    \
+       ret = trace_define_field(event_call, #type, #item,              \
+                                offsetof(typeof(field), item),         \
+                                sizeof(field.item),                    \
+                                0, filter_type);                       \
+       if (ret)                                                        \
+               return ret;
+
 #undef __field
 #define __field(type, item)    __field_ext(type, item, FILTER_OTHER)
 
+#undef __field_struct
+#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
+
 #undef __array
 #define __array(type, item, len)                                       \
        do {                                                            \
@@ -379,6 +403,12 @@ ftrace_define_fields_##call(struct ftrace_event_call *event_call)  \
 #undef __field_ext
 #define __field_ext(type, item, filter_type)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
+#undef __field_struct_ext
+#define __field_struct_ext(type, item, filter_type)
+
 #undef __array
 #define __array(type, item, len)
 
@@ -550,6 +580,9 @@ static inline notrace int ftrace_get_offsets_##call(                        \
 #undef __field
 #define __field(type, item)
 
+#undef __field_struct
+#define __field_struct(type, item)
+
 #undef __array
 #define __array(type, item, len)
 
index fed853f..9674145 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/tracepoint.h>
 #include <linux/unistd.h>
 #include <linux/ftrace_event.h>
+#include <linux/thread_info.h>
 
 #include <asm/ptrace.h>
 
@@ -32,4 +33,18 @@ struct syscall_metadata {
        struct ftrace_event_call *exit_event;
 };
 
+#if defined(CONFIG_TRACEPOINTS) && defined(CONFIG_HAVE_SYSCALL_TRACEPOINTS)
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+       if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+               set_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+       else
+               clear_tsk_thread_flag(p, TIF_SYSCALL_TRACEPOINT);
+}
+#else
+static inline void syscall_tracepoint_update(struct task_struct *p)
+{
+}
+#endif
+
 #endif /* _TRACE_SYSCALL_H */
index 6f9c38c..2f47824 100644 (file)
@@ -38,6 +38,7 @@ struct btrfs_ioctl_vol_args {
 #define BTRFS_SUBVOL_QGROUP_INHERIT    (1ULL << 2)
 #define BTRFS_FSID_SIZE 16
 #define BTRFS_UUID_SIZE 16
+#define BTRFS_UUID_UNPARSED_SIZE       37
 
 #define BTRFS_QGROUP_INHERIT_SET_LIMITS        (1ULL << 0)
 
index 813d11f..3e4323a 100644 (file)
@@ -92,11 +92,12 @@ struct can_ctrlmode {
 };
 
 #define CAN_CTRLMODE_LOOPBACK          0x01    /* Loopback mode */
-#define CAN_CTRLMODE_LISTENONLY                0x02    /* Listen-only mode */
+#define CAN_CTRLMODE_LISTENONLY                0x02    /* Listen-only mode */
 #define CAN_CTRLMODE_3_SAMPLES         0x04    /* Triple sampling mode */
 #define CAN_CTRLMODE_ONE_SHOT          0x08    /* One-Shot mode */
 #define CAN_CTRLMODE_BERR_REPORTING    0x10    /* Bus-error reporting */
 #define CAN_CTRLMODE_FD                        0x20    /* CAN FD mode */
+#define CAN_CTRLMODE_PRESUME_ACK       0x40    /* Ignore missing CAN ACKs */
 
 /*
  * CAN device statistics
index 6bb4338..e711f20 100644 (file)
@@ -148,7 +148,8 @@ struct cee_pfc {
  *
  * @selector: protocol identifier type
  * @protocol: protocol of type indicated
- * @priority: 3-bit unsigned integer indicating priority
+ * @priority: 3-bit unsigned integer indicating priority for IEEE
+ *            8-bit 802.1p user priority bitmap for CEE
  *
  * ----
  *  Selector field values
index 40b5ca8..25084a0 100644 (file)
  *  - add FATTR_CTIME
  *  - add ctime and ctimensec to fuse_setattr_in
  *  - add FUSE_RENAME2 request
+ *  - add FUSE_NO_OPEN_SUPPORT flag
  */
 
 #ifndef _LINUX_FUSE_H
@@ -229,6 +230,7 @@ struct fuse_file_lock {
  * FUSE_READDIRPLUS_AUTO: adaptive readdirplus
  * FUSE_ASYNC_DIO: asynchronous direct I/O submission
  * FUSE_WRITEBACK_CACHE: use writeback cache for buffered writes
+ * FUSE_NO_OPEN_SUPPORT: kernel supports zero-message opens
  */
 #define FUSE_ASYNC_READ                (1 << 0)
 #define FUSE_POSIX_LOCKS       (1 << 1)
@@ -247,6 +249,7 @@ struct fuse_file_lock {
 #define FUSE_READDIRPLUS_AUTO  (1 << 14)
 #define FUSE_ASYNC_DIO         (1 << 15)
 #define FUSE_WRITEBACK_CACHE   (1 << 16)
+#define FUSE_NO_OPEN_SUPPORT   (1 << 17)
 
 /**
  * CUSE INIT request/reply flags
index b385348..ff95760 100644 (file)
@@ -204,11 +204,17 @@ enum {
        IFLA_INET6_CACHEINFO,   /* time values and max reasm size */
        IFLA_INET6_ICMP6STATS,  /* statistics (icmpv6)          */
        IFLA_INET6_TOKEN,       /* device token                 */
+       IFLA_INET6_ADDR_GEN_MODE, /* implicit address generator mode */
        __IFLA_INET6_MAX
 };
 
 #define IFLA_INET6_MAX (__IFLA_INET6_MAX - 1)
 
+enum in6_addr_gen_mode {
+       IN6_ADDR_GEN_MODE_EUI64,
+       IN6_ADDR_GEN_MODE_NONE,
+};
+
 enum {
        BRIDGE_MODE_UNSPEC,
        BRIDGE_MODE_HAIRPIN,
index bac27fa..da2d668 100644 (file)
@@ -108,7 +108,7 @@ struct tpacket_auxdata {
 
 /* Rx and Tx ring - header status */
 #define TP_STATUS_TS_SOFTWARE          (1 << 29)
-#define TP_STATUS_TS_SYS_HARDWARE      (1 << 30)
+#define TP_STATUS_TS_SYS_HARDWARE      (1 << 30) /* deprecated, never set */
 #define TP_STATUS_TS_RAW_HARDWARE      (1 << 31)
 
 /* Rx ring - feature request bits */
index 0d8e0f0..22b7a69 100644 (file)
@@ -233,6 +233,7 @@ struct in6_flowlabel_req {
 #if 0  /* not yet */
 #define IPV6_USE_MIN_MTU       63
 #endif
+#define IPV6_AUTOFLOWLABEL     64
 
 /*
  * Netfilter (1)
index 593b0e3..efa2666 100644 (file)
@@ -163,6 +163,7 @@ enum {
        DEVCONF_MLDV1_UNSOLICITED_REPORT_INTERVAL,
        DEVCONF_MLDV2_UNSOLICITED_REPORT_INTERVAL,
        DEVCONF_SUPPRESS_FRAG_NDISC,
+       DEVCONF_ACCEPT_RA_FROM_LOCAL,
        DEVCONF_MAX
 };
 
index fdfbd1c..5581854 100644 (file)
 #define INIT_NETDEV_GROUP      0
 
 
+/* interface name assignment types (sysfs name_assign_type attribute) */
+#define NET_NAME_UNKNOWN       0       /* unknown origin (not exposed to userspace) */
+#define NET_NAME_ENUM          1       /* enumerated by kernel */
+#define NET_NAME_PREDICTABLE   2       /* predictably named by the kernel */
+#define NET_NAME_USER          3       /* provided by user-space */
+#define NET_NAME_RENAMED       4       /* renamed by user-space */
 
 /* Media selection options. */
 enum {
index 2a88f64..801bdd1 100644 (file)
@@ -697,6 +697,8 @@ enum nft_counter_attributes {
  * @NFTA_LOG_PREFIX: prefix to prepend to log messages (NLA_STRING)
  * @NFTA_LOG_SNAPLEN: length of payload to include in netlink message (NLA_U32)
  * @NFTA_LOG_QTHRESHOLD: queue threshold (NLA_U32)
+ * @NFTA_LOG_LEVEL: log level (NLA_U32)
+ * @NFTA_LOG_FLAGS: logging flags (NLA_U32)
  */
 enum nft_log_attributes {
        NFTA_LOG_UNSPEC,
@@ -704,6 +706,8 @@ enum nft_log_attributes {
        NFTA_LOG_PREFIX,
        NFTA_LOG_SNAPLEN,
        NFTA_LOG_QTHRESHOLD,
+       NFTA_LOG_LEVEL,
+       NFTA_LOG_FLAGS,
        __NFTA_LOG_MAX
 };
 #define NFTA_LOG_MAX           (__NFTA_LOG_MAX - 1)
index 5dda450..1fad2c2 100644 (file)
@@ -6,12 +6,14 @@
 
 #define XT_BPF_MAX_NUM_INSTR   64
 
+struct bpf_prog;
+
 struct xt_bpf_info {
        __u16 bpf_program_num_elem;
        struct sock_filter bpf_program[XT_BPF_MAX_NUM_INSTR];
 
        /* only used in the kernel */
-       struct sk_filter *filter __attribute__((aligned(8)));
+       struct bpf_prog *filter __attribute__((aligned(8)));
 };
 
 #endif /*_XT_BPF_H */
index 348717c..0fbad8e 100644 (file)
@@ -14,6 +14,5 @@ header-y += ebt_nflog.h
 header-y += ebt_pkttype.h
 header-y += ebt_redirect.h
 header-y += ebt_stp.h
-header-y += ebt_ulog.h
 header-y += ebt_vlan.h
 header-y += ebtables.h
diff --git a/include/uapi/linux/netfilter_bridge/ebt_ulog.h b/include/uapi/linux/netfilter_bridge/ebt_ulog.h
deleted file mode 100644 (file)
index 89a6bec..0000000
+++ /dev/null
@@ -1,38 +0,0 @@
-#ifndef _EBT_ULOG_H
-#define _EBT_ULOG_H
-
-#include <linux/types.h>
-
-#define EBT_ULOG_DEFAULT_NLGROUP 0
-#define EBT_ULOG_DEFAULT_QTHRESHOLD 1
-#define EBT_ULOG_MAXNLGROUPS 32 /* hardcoded netlink max */
-#define EBT_ULOG_PREFIX_LEN 32
-#define EBT_ULOG_MAX_QLEN 50
-#define EBT_ULOG_WATCHER "ulog"
-#define EBT_ULOG_VERSION 1
-
-struct ebt_ulog_info {
-       __u32 nlgroup;
-       unsigned int cprange;
-       unsigned int qthreshold;
-       char prefix[EBT_ULOG_PREFIX_LEN];
-};
-
-typedef struct ebt_ulog_packet_msg {
-       int version;
-       char indev[IFNAMSIZ];
-       char outdev[IFNAMSIZ];
-       char physindev[IFNAMSIZ];
-       char physoutdev[IFNAMSIZ];
-       char prefix[EBT_ULOG_PREFIX_LEN];
-       struct timeval stamp;
-       unsigned long mark;
-       unsigned int hook;
-       size_t data_len;
-       /* The complete packet, including Ethernet header and perhaps
-        * the VLAN header is appended */
-       unsigned char data[0] __attribute__
-                             ((aligned (__alignof__(struct ebt_ulog_info))));
-} ebt_ulog_packet_msg_t;
-
-#endif /* _EBT_ULOG_H */
index fb00843..ecb291d 100644 (file)
@@ -5,7 +5,6 @@ header-y += ipt_ECN.h
 header-y += ipt_LOG.h
 header-y += ipt_REJECT.h
 header-y += ipt_TTL.h
-header-y += ipt_ULOG.h
 header-y += ipt_ah.h
 header-y += ipt_ecn.h
 header-y += ipt_ttl.h
diff --git a/include/uapi/linux/netfilter_ipv4/ipt_ULOG.h b/include/uapi/linux/netfilter_ipv4/ipt_ULOG.h
deleted file mode 100644 (file)
index 417aad2..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/* Header file for IP tables userspace logging, Version 1.8
- *
- * (C) 2000-2002 by Harald Welte <laforge@gnumonks.org>
- * 
- * Distributed under the terms of GNU GPL */
-
-#ifndef _IPT_ULOG_H
-#define _IPT_ULOG_H
-
-#ifndef NETLINK_NFLOG
-#define NETLINK_NFLOG  5
-#endif
-
-#define ULOG_DEFAULT_NLGROUP   1
-#define ULOG_DEFAULT_QTHRESHOLD        1
-
-#define ULOG_MAC_LEN   80
-#define ULOG_PREFIX_LEN        32
-
-#define ULOG_MAX_QLEN  50
-/* Why 50? Well... there is a limit imposed by the slab cache 131000
- * bytes. So the multipart netlink-message has to be < 131000 bytes.
- * Assuming a standard ethernet-mtu of 1500, we could define this up
- * to 80... but even 50 seems to be big enough. */
-
-/* private data structure for each rule with a ULOG target */
-struct ipt_ulog_info {
-       unsigned int nl_group;
-       size_t copy_range;
-       size_t qthreshold;
-       char prefix[ULOG_PREFIX_LEN];
-};
-
-/* Format of the ULOG packets passed through netlink */
-typedef struct ulog_packet_msg {
-       unsigned long mark;
-       long timestamp_sec;
-       long timestamp_usec;
-       unsigned int hook;
-       char indev_name[IFNAMSIZ];
-       char outdev_name[IFNAMSIZ];
-       size_t data_len;
-       char prefix[ULOG_PREFIX_LEN];
-       unsigned char mac_len;
-       unsigned char mac[ULOG_MAC_LEN];
-       unsigned char payload[0];
-} ulog_packet_msg_t;
-
-#endif /*_IPT_ULOG_H*/
index 0b979ee..a794d1d 100644 (file)
@@ -118,6 +118,9 @@ struct ovs_vport_stats {
 /* Allow last Netlink attribute to be unaligned */
 #define OVS_DP_F_UNALIGNED     (1 << 0)
 
+/* Allow datapath to associate multiple Netlink PIDs to each vport */
+#define OVS_DP_F_VPORT_PIDS    (1 << 1)
+
 /* Fixed logical ports. */
 #define OVSP_LOCAL      ((__u32)0)
 
@@ -203,9 +206,10 @@ enum ovs_vport_type {
  * this is the name of the network device.  Maximum length %IFNAMSIZ-1 bytes
  * plus a null terminator.
  * @OVS_VPORT_ATTR_OPTIONS: Vport-specific configuration information.
- * @OVS_VPORT_ATTR_UPCALL_PID: The Netlink socket in userspace that
- * OVS_PACKET_CMD_MISS upcalls will be directed to for packets received on
- * this port.  A value of zero indicates that upcalls should not be sent.
+ * @OVS_VPORT_ATTR_UPCALL_PID: The array of Netlink socket pids in userspace
+ * among which OVS_PACKET_CMD_MISS upcalls will be distributed for packets
+ * received on this port.  If this is a single-element array of value 0,
+ * upcalls should not be sent.
  * @OVS_VPORT_ATTR_STATS: A &struct ovs_vport_stats giving statistics for
  * packets sent or received through the vport.
  *
@@ -228,7 +232,8 @@ enum ovs_vport_attr {
        OVS_VPORT_ATTR_TYPE,    /* u32 OVS_VPORT_TYPE_* constant. */
        OVS_VPORT_ATTR_NAME,    /* string name, up to IFNAMSIZ bytes long */
        OVS_VPORT_ATTR_OPTIONS, /* nested attributes, varies by vport type */
-       OVS_VPORT_ATTR_UPCALL_PID, /* u32 Netlink PID to receive upcalls */
+       OVS_VPORT_ATTR_UPCALL_PID, /* array of u32 Netlink socket PIDs for */
+                               /* receiving upcalls */
        OVS_VPORT_ATTR_STATS,   /* struct ovs_vport_stats */
        __OVS_VPORT_ATTR_MAX
 };
index 5312fae..9269de2 100644 (file)
@@ -705,6 +705,7 @@ enum perf_event_type {
         *      u32                             min;
         *      u64                             ino;
         *      u64                             ino_generation;
+        *      u32                             prot, flags;
         *      char                            filename[];
         *      struct sample_id                sample_id;
         * };
index 266022a..ce70fe6 100644 (file)
@@ -95,6 +95,9 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_GET_ASSOC_ID_LIST 29      /* Read only */
 #define SCTP_AUTO_ASCONF       30
 #define SCTP_PEER_ADDR_THLDS   31
+#define SCTP_RECVRCVINFO       32
+#define SCTP_RECVNXTINFO       33
+#define SCTP_DEFAULT_SNDINFO   34
 
 /* Internal Socket Options. Some of the sctp library functions are
  * implemented using these socket options.
@@ -110,8 +113,14 @@ typedef __s32 sctp_assoc_t;
 #define SCTP_SOCKOPT_CONNECTX3 111     /* CONNECTX requests (updated) */
 #define SCTP_GET_ASSOC_STATS   112     /* Read only */
 
-/*
- * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
+/* These are bit fields for msghdr->msg_flags.  See section 5.1.  */
+/* On user space Linux, these live in <bits/socket.h> as an enum.  */
+enum sctp_msg_flags {
+       MSG_NOTIFICATION = 0x8000,
+#define MSG_NOTIFICATION MSG_NOTIFICATION
+};
+
+/* 5.3.1 SCTP Initiation Structure (SCTP_INIT)
  *
  *   This cmsghdr structure provides information for initializing new
  *   SCTP associations with sendmsg().  The SCTP_INITMSG socket option
@@ -121,7 +130,6 @@ typedef __s32 sctp_assoc_t;
  *   cmsg_level    cmsg_type      cmsg_data[]
  *   ------------  ------------   ----------------------
  *   IPPROTO_SCTP  SCTP_INIT      struct sctp_initmsg
- *
  */
 struct sctp_initmsg {
        __u16 sinit_num_ostreams;
@@ -130,8 +138,7 @@ struct sctp_initmsg {
        __u16 sinit_max_init_timeo;
 };
 
-/*
- * 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
+/* 5.3.2 SCTP Header Information Structure (SCTP_SNDRCV)
  *
  *   This cmsghdr structure specifies SCTP options for sendmsg() and
  *   describes SCTP header information about a received message through
@@ -140,7 +147,6 @@ struct sctp_initmsg {
  *   cmsg_level    cmsg_type      cmsg_data[]
  *   ------------  ------------   ----------------------
  *   IPPROTO_SCTP  SCTP_SNDRCV    struct sctp_sndrcvinfo
- *
  */
 struct sctp_sndrcvinfo {
        __u16 sinfo_stream;
@@ -154,19 +160,74 @@ struct sctp_sndrcvinfo {
        sctp_assoc_t sinfo_assoc_id;
 };
 
+/* 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
+ *
+ *   This cmsghdr structure specifies SCTP options for sendmsg().
+ *
+ *   cmsg_level    cmsg_type      cmsg_data[]
+ *   ------------  ------------   -------------------
+ *   IPPROTO_SCTP  SCTP_SNDINFO   struct sctp_sndinfo
+ */
+struct sctp_sndinfo {
+       __u16 snd_sid;
+       __u16 snd_flags;
+       __u32 snd_ppid;
+       __u32 snd_context;
+       sctp_assoc_t snd_assoc_id;
+};
+
+/* 5.3.5 SCTP Receive Information Structure (SCTP_RCVINFO)
+ *
+ *   This cmsghdr structure describes SCTP receive information
+ *   about a received message through recvmsg().
+ *
+ *   cmsg_level    cmsg_type      cmsg_data[]
+ *   ------------  ------------   -------------------
+ *   IPPROTO_SCTP  SCTP_RCVINFO   struct sctp_rcvinfo
+ */
+struct sctp_rcvinfo {
+       __u16 rcv_sid;
+       __u16 rcv_ssn;
+       __u16 rcv_flags;
+       __u32 rcv_ppid;
+       __u32 rcv_tsn;
+       __u32 rcv_cumtsn;
+       __u32 rcv_context;
+       sctp_assoc_t rcv_assoc_id;
+};
+
+/* 5.3.6 SCTP Next Receive Information Structure (SCTP_NXTINFO)
+ *
+ *   This cmsghdr structure describes SCTP receive information
+ *   of the next message that will be delivered through recvmsg()
+ *   if this information is already available when delivering
+ *   the current message.
+ *
+ *   cmsg_level    cmsg_type      cmsg_data[]
+ *   ------------  ------------   -------------------
+ *   IPPROTO_SCTP  SCTP_NXTINFO   struct sctp_nxtinfo
+ */
+struct sctp_nxtinfo {
+       __u16 nxt_sid;
+       __u16 nxt_flags;
+       __u32 nxt_ppid;
+       __u32 nxt_length;
+       sctp_assoc_t nxt_assoc_id;
+};
+
 /*
  *  sinfo_flags: 16 bits (unsigned integer)
  *
  *   This field may contain any of the following flags and is composed of
  *   a bitwise OR of these values.
  */
-
 enum sctp_sinfo_flags {
-       SCTP_UNORDERED = 1,  /* Send/receive message unordered. */
-       SCTP_ADDR_OVER = 2,  /* Override the primary destination. */
-       SCTP_ABORT=4,        /* Send an ABORT message to the peer. */
-       SCTP_SACK_IMMEDIATELY = 8,      /* SACK should be sent without delay */
-       SCTP_EOF=MSG_FIN,    /* Initiate graceful shutdown process. */
+       SCTP_UNORDERED          = (1 << 0), /* Send/receive message unordered. */
+       SCTP_ADDR_OVER          = (1 << 1), /* Override the primary destination. */
+       SCTP_ABORT              = (1 << 2), /* Send an ABORT message to the peer. */
+       SCTP_SACK_IMMEDIATELY   = (1 << 3), /* SACK should be sent without delay. */
+       SCTP_NOTIFICATION       = MSG_NOTIFICATION, /* Next message is not user msg but notification. */
+       SCTP_EOF                = MSG_FIN,  /* Initiate graceful shutdown process. */
 };
 
 typedef union {
@@ -177,10 +238,16 @@ typedef union {
 
 /* These are cmsg_types.  */
 typedef enum sctp_cmsg_type {
-       SCTP_INIT,              /* 5.2.1 SCTP Initiation Structure */
+       SCTP_INIT,              /* 5.2.1 SCTP Initiation Structure */
 #define SCTP_INIT      SCTP_INIT
-       SCTP_SNDRCV,            /* 5.2.2 SCTP Header Information Structure */
+       SCTP_SNDRCV,            /* 5.2.2 SCTP Header Information Structure */
 #define SCTP_SNDRCV    SCTP_SNDRCV
+       SCTP_SNDINFO,           /* 5.3.4 SCTP Send Information Structure */
+#define SCTP_SNDINFO   SCTP_SNDINFO
+       SCTP_RCVINFO,           /* 5.3.5 SCTP Receive Information Structure */
+#define SCTP_RCVINFO   SCTP_RCVINFO
+       SCTP_NXTINFO,           /* 5.3.6 SCTP Next Receive Information Structure */
+#define SCTP_NXTINFO   SCTP_NXTINFO
 } sctp_cmsg_t;
 
 /*
@@ -808,13 +875,6 @@ struct sctp_assoc_stats {
        __u64           sas_ictrlchunks; /* Control chunks received */
 };
 
-/* These are bit fields for msghdr->msg_flags.  See section 5.1.  */
-/* On user space Linux, these live in <bits/socket.h> as an enum.  */
-enum sctp_msg_flags {
-       MSG_NOTIFICATION = 0x8000,
-#define MSG_NOTIFICATION MSG_NOTIFICATION
-};
-
 /*
  * 8.1 sctp_bindx()
  *
index 6d67213..43aaba1 100644 (file)
@@ -568,6 +568,7 @@ enum {
        NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN=22,
        NET_IPV6_PROXY_NDP=23,
        NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
+       NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
        __NET_IPV6_MAX
 };
 
index 41a76ac..876d0a1 100644 (file)
 
 #define TIPC_MIN_LINK_WIN 16
 #define TIPC_DEF_LINK_WIN 50
-#define TIPC_MAX_LINK_WIN 150
+#define TIPC_MAX_LINK_WIN 8191
 
 
 struct tipc_node_info {
index 2a4b4a7..24b68c5 100644 (file)
@@ -33,6 +33,13 @@ struct usb_endpoint_descriptor_no_audio {
        __u8  bInterval;
 } __attribute__((packed));
 
+/* Legacy format, deprecated as of 3.14. */
+struct usb_functionfs_descs_head {
+       __le32 magic;
+       __le32 length;
+       __le32 fs_count;
+       __le32 hs_count;
+} __attribute__((packed, deprecated));
 
 /*
  * Descriptors format:
index 21eed48..1964026 100644 (file)
@@ -39,7 +39,7 @@
 struct snd_compressed_buffer {
        __u32 fragment_size;
        __u32 fragments;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_params: compressed stream params
@@ -51,7 +51,7 @@ struct snd_compr_params {
        struct snd_compressed_buffer buffer;
        struct snd_codec codec;
        __u8 no_wake_mode;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_tstamp: timestamp descriptor
@@ -70,7 +70,7 @@ struct snd_compr_tstamp {
        __u32 pcm_frames;
        __u32 pcm_io_frames;
        __u32 sampling_rate;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_avail: avail descriptor
@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
 struct snd_compr_avail {
        __u64 avail;
        struct snd_compr_tstamp tstamp;
-} __attribute__((packed));
+} __attribute__((packed, aligned(4)));
 
 enum snd_compr_direction {
        SND_COMPRESS_PLAYBACK = 0,
@@ -107,7 +107,7 @@ struct snd_compr_caps {
        __u32 max_fragments;
        __u32 codecs[MAX_NUM_CODECS];
        __u32 reserved[11];
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_compr_codec_caps: query capability of codec
@@ -119,7 +119,7 @@ struct snd_compr_codec_caps {
        __u32 codec;
        __u32 num_descriptors;
        struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS];
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
@@ -140,7 +140,7 @@ enum {
 struct snd_compr_metadata {
         __u32 key;
         __u32 value[8];
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * compress path ioctl definitions
index 165e705..d9bd9ca 100644 (file)
@@ -268,7 +268,7 @@ struct snd_enc_vorbis {
        __u32 max_bit_rate;
        __u32 min_bit_rate;
        __u32 downmix;
-};
+} __attribute__((packed, aligned(4)));
 
 
 /**
@@ -284,7 +284,7 @@ struct snd_enc_real {
        __u32 quant_bits;
        __u32 start_region;
        __u32 num_regions;
-};
+} __attribute__((packed, aligned(4)));
 
 /**
  * struct snd_enc_flac
@@ -308,12 +308,12 @@ struct snd_enc_real {
 struct snd_enc_flac {
        __u32 num;
        __u32 gain;
-};
+} __attribute__((packed, aligned(4)));
 
 struct snd_enc_generic {
        __u32 bw;       /* encoder bandwidth */
        __s32 reserved[15];
-};
+} __attribute__((packed, aligned(4)));
 
 union snd_codec_options {
        struct snd_enc_wma wma;
@@ -321,7 +321,7 @@ union snd_codec_options {
        struct snd_enc_real real;
        struct snd_enc_flac flac;
        struct snd_enc_generic generic;
-};
+} __attribute__((packed, aligned(4)));
 
 /** struct snd_codec_desc - description of codec capabilities
  * @max_ch: Maximum number of audio channels
@@ -358,7 +358,7 @@ struct snd_codec_desc {
        __u32 formats;
        __u32 min_buffer;
        __u32 reserved[15];
-};
+} __attribute__((packed, aligned(4)));
 
 /** struct snd_codec
  * @id: Identifies the supported audio encoder/decoder.
@@ -399,6 +399,6 @@ struct snd_codec {
        __u32 align;
        union snd_codec_options options;
        __u32 reserved[3];
-};
+} __attribute__((packed, aligned(4)));
 
 #endif
index a5af2a2..5c1aba1 100644 (file)
@@ -170,6 +170,7 @@ gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, phys_addr_t addr,
        unmap->dev_bus_addr = 0;
 }
 
+int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status);
 int arch_gnttab_map_shared(xen_pfn_t *frames, unsigned long nr_gframes,
                           unsigned long max_nr_gframes,
                           void **__shared);
index 35536d9..76768ee 100644 (file)
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
 
 endif
 
+config ARCH_SUPPORTS_ATOMIC_RMW
+       bool
+
 config MUTEX_SPIN_ON_OWNER
        def_bool y
-       depends on SMP && !DEBUG_MUTEXES
+       depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
+
+config RWSEM_SPIN_ON_OWNER
+       def_bool y
+       depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
 
 config ARCH_USE_QUEUE_RWLOCK
        bool
index f2a8b62..e7360b7 100644 (file)
@@ -87,6 +87,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
+obj-$(CONFIG_NET) += bpf/
 
 obj-$(CONFIG_PERF_EVENTS) += events/
 
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
new file mode 100644 (file)
index 0000000..6a71145
--- /dev/null
@@ -0,0 +1 @@
+obj-y := core.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
new file mode 100644 (file)
index 0000000..7f0dbcb
--- /dev/null
@@ -0,0 +1,534 @@
+/*
+ * Linux Socket Filter - Kernel level socket filtering
+ *
+ * Based on the design of the Berkeley Packet Filter. The new
+ * internal format has been designed by PLUMgrid:
+ *
+ *     Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com
+ *
+ * Authors:
+ *
+ *     Jay Schulist <jschlst@samba.org>
+ *     Alexei Starovoitov <ast@plumgrid.com>
+ *     Daniel Borkmann <dborkman@redhat.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * Andi Kleen - Fix a few bad bugs and races.
+ * Kris Katterjohn - Added many additional checks in bpf_check_classic()
+ */
+#include <linux/filter.h>
+#include <linux/skbuff.h>
+#include <asm/unaligned.h>
+
+/* Registers */
+#define BPF_R0 regs[BPF_REG_0]
+#define BPF_R1 regs[BPF_REG_1]
+#define BPF_R2 regs[BPF_REG_2]
+#define BPF_R3 regs[BPF_REG_3]
+#define BPF_R4 regs[BPF_REG_4]
+#define BPF_R5 regs[BPF_REG_5]
+#define BPF_R6 regs[BPF_REG_6]
+#define BPF_R7 regs[BPF_REG_7]
+#define BPF_R8 regs[BPF_REG_8]
+#define BPF_R9 regs[BPF_REG_9]
+#define BPF_R10        regs[BPF_REG_10]
+
+/* Named registers */
+#define DST    regs[insn->dst_reg]
+#define SRC    regs[insn->src_reg]
+#define FP     regs[BPF_REG_FP]
+#define ARG1   regs[BPF_REG_ARG1]
+#define CTX    regs[BPF_REG_CTX]
+#define IMM    insn->imm
+
+/* No hurry in this branch
+ *
+ * Exported for the bpf jit load helper.
+ */
+void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
+{
+       u8 *ptr = NULL;
+
+       if (k >= SKF_NET_OFF)
+               ptr = skb_network_header(skb) + k - SKF_NET_OFF;
+       else if (k >= SKF_LL_OFF)
+               ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
+       if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
+               return ptr;
+
+       return NULL;
+}
+
+/* Base function for offset calculation. Needs to go into .text section,
+ * therefore keeping it non-static as well; will also be used by JITs
+ * anyway later on, so do not let the compiler omit it.
+ */
+noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
+{
+       return 0;
+}
+
+/**
+ *     __bpf_prog_run - run eBPF program on a given context
+ *     @ctx: is the data we are operating on
+ *     @insn: is the array of eBPF instructions
+ *
+ * Decode and execute eBPF instructions.
+ */
+static unsigned int __bpf_prog_run(void *ctx, const struct bpf_insn *insn)
+{
+       u64 stack[MAX_BPF_STACK / sizeof(u64)];
+       u64 regs[MAX_BPF_REG], tmp;
+       static const void *jumptable[256] = {
+               [0 ... 255] = &&default_label,
+               /* Now overwrite non-defaults ... */
+               /* 32 bit ALU operations */
+               [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
+               [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
+               [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
+               [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
+               [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
+               [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
+               [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
+               [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
+               [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
+               [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
+               [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
+               [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
+               [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
+               [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
+               [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
+               [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
+               [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
+               [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
+               [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
+               [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
+               [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
+               [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
+               [BPF_ALU | BPF_NEG] = &&ALU_NEG,
+               [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
+               [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
+               /* 64 bit ALU operations */
+               [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
+               [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
+               [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
+               [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
+               [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
+               [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
+               [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
+               [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
+               [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
+               [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
+               [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
+               [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
+               [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
+               [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
+               [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
+               [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
+               [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
+               [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
+               [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
+               [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
+               [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
+               [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
+               [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
+               [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
+               [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
+               /* Call instruction */
+               [BPF_JMP | BPF_CALL] = &&JMP_CALL,
+               /* Jumps */
+               [BPF_JMP | BPF_JA] = &&JMP_JA,
+               [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
+               [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
+               [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
+               [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
+               [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
+               [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
+               [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
+               [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
+               [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
+               [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
+               [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
+               [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
+               [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
+               [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
+               /* Program return */
+               [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
+               /* Store instructions */
+               [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
+               [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
+               [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
+               [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
+               [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
+               [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
+               [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
+               [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
+               [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
+               [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
+               /* Load instructions */
+               [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
+               [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
+               [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
+               [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
+               [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
+               [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
+               [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
+               [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
+               [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
+               [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
+       };
+       void *ptr;
+       int off;
+
+#define CONT    ({ insn++; goto select_insn; })
+#define CONT_JMP ({ insn++; goto select_insn; })
+
+       FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
+       ARG1 = (u64) (unsigned long) ctx;
+
+       /* Registers used in classic BPF programs need to be reset first. */
+       regs[BPF_REG_A] = 0;
+       regs[BPF_REG_X] = 0;
+
+select_insn:
+       goto *jumptable[insn->code];
+
+       /* ALU */
+#define ALU(OPCODE, OP)                        \
+       ALU64_##OPCODE##_X:             \
+               DST = DST OP SRC;       \
+               CONT;                   \
+       ALU_##OPCODE##_X:               \
+               DST = (u32) DST OP (u32) SRC;   \
+               CONT;                   \
+       ALU64_##OPCODE##_K:             \
+               DST = DST OP IMM;               \
+               CONT;                   \
+       ALU_##OPCODE##_K:               \
+               DST = (u32) DST OP (u32) IMM;   \
+               CONT;
+
+       ALU(ADD,  +)
+       ALU(SUB,  -)
+       ALU(AND,  &)
+       ALU(OR,   |)
+       ALU(LSH, <<)
+       ALU(RSH, >>)
+       ALU(XOR,  ^)
+       ALU(MUL,  *)
+#undef ALU
+       ALU_NEG:
+               DST = (u32) -DST;
+               CONT;
+       ALU64_NEG:
+               DST = -DST;
+               CONT;
+       ALU_MOV_X:
+               DST = (u32) SRC;
+               CONT;
+       ALU_MOV_K:
+               DST = (u32) IMM;
+               CONT;
+       ALU64_MOV_X:
+               DST = SRC;
+               CONT;
+       ALU64_MOV_K:
+               DST = IMM;
+               CONT;
+       ALU64_ARSH_X:
+               (*(s64 *) &DST) >>= SRC;
+               CONT;
+       ALU64_ARSH_K:
+               (*(s64 *) &DST) >>= IMM;
+               CONT;
+       ALU64_MOD_X:
+               if (unlikely(SRC == 0))
+                       return 0;
+               tmp = DST;
+               DST = do_div(tmp, SRC);
+               CONT;
+       ALU_MOD_X:
+               if (unlikely(SRC == 0))
+                       return 0;
+               tmp = (u32) DST;
+               DST = do_div(tmp, (u32) SRC);
+               CONT;
+       ALU64_MOD_K:
+               tmp = DST;
+               DST = do_div(tmp, IMM);
+               CONT;
+       ALU_MOD_K:
+               tmp = (u32) DST;
+               DST = do_div(tmp, (u32) IMM);
+               CONT;
+       ALU64_DIV_X:
+               if (unlikely(SRC == 0))
+                       return 0;
+               do_div(DST, SRC);
+               CONT;
+       ALU_DIV_X:
+               if (unlikely(SRC == 0))
+                       return 0;
+               tmp = (u32) DST;
+               do_div(tmp, (u32) SRC);
+               DST = (u32) tmp;
+               CONT;
+       ALU64_DIV_K:
+               do_div(DST, IMM);
+               CONT;
+       ALU_DIV_K:
+               tmp = (u32) DST;
+               do_div(tmp, (u32) IMM);
+               DST = (u32) tmp;
+               CONT;
+       ALU_END_TO_BE:
+               switch (IMM) {
+               case 16:
+                       DST = (__force u16) cpu_to_be16(DST);
+                       break;
+               case 32:
+                       DST = (__force u32) cpu_to_be32(DST);
+                       break;
+               case 64:
+                       DST = (__force u64) cpu_to_be64(DST);
+                       break;
+               }
+               CONT;
+       ALU_END_TO_LE:
+               switch (IMM) {
+               case 16:
+                       DST = (__force u16) cpu_to_le16(DST);
+                       break;
+               case 32:
+                       DST = (__force u32) cpu_to_le32(DST);
+                       break;
+               case 64:
+                       DST = (__force u64) cpu_to_le64(DST);
+                       break;
+               }
+               CONT;
+
+       /* CALL */
+       JMP_CALL:
+               /* Function call scratches BPF_R1-BPF_R5 registers,
+                * preserves BPF_R6-BPF_R9, and stores return value
+                * into BPF_R0.
+                */
+               BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
+                                                      BPF_R4, BPF_R5);
+               CONT;
+
+       /* JMP */
+       JMP_JA:
+               insn += insn->off;
+               CONT;
+       JMP_JEQ_X:
+               if (DST == SRC) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JEQ_K:
+               if (DST == IMM) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JNE_X:
+               if (DST != SRC) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JNE_K:
+               if (DST != IMM) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JGT_X:
+               if (DST > SRC) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JGT_K:
+               if (DST > IMM) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JGE_X:
+               if (DST >= SRC) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JGE_K:
+               if (DST >= IMM) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSGT_X:
+               if (((s64) DST) > ((s64) SRC)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSGT_K:
+               if (((s64) DST) > ((s64) IMM)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSGE_X:
+               if (((s64) DST) >= ((s64) SRC)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSGE_K:
+               if (((s64) DST) >= ((s64) IMM)) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSET_X:
+               if (DST & SRC) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_JSET_K:
+               if (DST & IMM) {
+                       insn += insn->off;
+                       CONT_JMP;
+               }
+               CONT;
+       JMP_EXIT:
+               return BPF_R0;
+
+       /* STX and ST and LDX*/
+#define LDST(SIZEOP, SIZE)                                             \
+       STX_MEM_##SIZEOP:                                               \
+               *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
+               CONT;                                                   \
+       ST_MEM_##SIZEOP:                                                \
+               *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
+               CONT;                                                   \
+       LDX_MEM_##SIZEOP:                                               \
+               DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
+               CONT;
+
+       LDST(B,   u8)
+       LDST(H,  u16)
+       LDST(W,  u32)
+       LDST(DW, u64)
+#undef LDST
+       STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
+               atomic_add((u32) SRC, (atomic_t *)(unsigned long)
+                          (DST + insn->off));
+               CONT;
+       STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
+               atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
+                            (DST + insn->off));
+               CONT;
+       LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
+               off = IMM;
+load_word:
+               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
+                * only appearing in the programs where ctx ==
+                * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
+                * == BPF_R6, bpf_convert_filter() saves it in BPF_R6,
+                * internal BPF verifier will check that BPF_R6 ==
+                * ctx.
+                *
+                * BPF_ABS and BPF_IND are wrappers of function calls,
+                * so they scratch BPF_R1-BPF_R5 registers, preserve
+                * BPF_R6-BPF_R9, and store return value into BPF_R0.
+                *
+                * Implicit input:
+                *   ctx == skb == BPF_R6 == CTX
+                *
+                * Explicit input:
+                *   SRC == any register
+                *   IMM == 32-bit immediate
+                *
+                * Output:
+                *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
+                */
+
+               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
+               if (likely(ptr != NULL)) {
+                       BPF_R0 = get_unaligned_be32(ptr);
+                       CONT;
+               }
+
+               return 0;
+       LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
+               off = IMM;
+load_half:
+               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
+               if (likely(ptr != NULL)) {
+                       BPF_R0 = get_unaligned_be16(ptr);
+                       CONT;
+               }
+
+               return 0;
+       LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
+               off = IMM;
+load_byte:
+               ptr = bpf_load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
+               if (likely(ptr != NULL)) {
+                       BPF_R0 = *(u8 *)ptr;
+                       CONT;
+               }
+
+               return 0;
+       LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
+               off = IMM + SRC;
+               goto load_word;
+       LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
+               off = IMM + SRC;
+               goto load_half;
+       LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
+               off = IMM + SRC;
+               goto load_byte;
+
+       default_label:
+               /* If we ever reach this, we have a bug somewhere. */
+               WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
+               return 0;
+}
+
+void __weak bpf_int_jit_compile(struct bpf_prog *prog)
+{
+}
+
+/**
+ *     bpf_prog_select_runtime - select execution runtime for BPF program
+ *     @fp: bpf_prog populated with internal BPF program
+ *
+ * try to JIT internal BPF program, if JIT is not available select interpreter
+ * BPF program will be executed via BPF_PROG_RUN() macro
+ */
+void bpf_prog_select_runtime(struct bpf_prog *fp)
+{
+       fp->bpf_func = (void *) __bpf_prog_run;
+
+       /* Probe if internal BPF can be JITed */
+       bpf_int_jit_compile(fp);
+}
+EXPORT_SYMBOL_GPL(bpf_prog_select_runtime);
+
+/* free internal BPF program */
+void bpf_prog_free(struct bpf_prog *fp)
+{
+       bpf_jit_free(fp);
+}
+EXPORT_SYMBOL_GPL(bpf_prog_free);
index 7868fc3..70776ae 100644 (file)
@@ -1648,10 +1648,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                         int flags, const char *unused_dev_name,
                         void *data)
 {
+       struct super_block *pinned_sb = NULL;
+       struct cgroup_subsys *ss;
        struct cgroup_root *root;
        struct cgroup_sb_opts opts;
        struct dentry *dentry;
        int ret;
+       int i;
        bool new_sb;
 
        /*
@@ -1677,6 +1680,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                goto out_unlock;
        }
 
+       /*
+        * Destruction of cgroup root is asynchronous, so subsystems may
+        * still be dying after the previous unmount.  Let's drain the
+        * dying subsystems.  We just need to ensure that the ones
+        * unmounted previously finish dying and don't care about new ones
+        * starting.  Testing ref liveliness is good enough.
+        */
+       for_each_subsys(ss, i) {
+               if (!(opts.subsys_mask & (1 << i)) ||
+                   ss->root == &cgrp_dfl_root)
+                       continue;
+
+               if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
+                       mutex_unlock(&cgroup_mutex);
+                       msleep(10);
+                       ret = restart_syscall();
+                       goto out_free;
+               }
+               cgroup_put(&ss->root->cgrp);
+       }
+
        for_each_root(root) {
                bool name_match = false;
 
@@ -1717,15 +1741,23 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
                }
 
                /*
-                * A root's lifetime is governed by its root cgroup.
-                * tryget_live failure indicate that the root is being
-                * destroyed.  Wait for destruction to complete so that the
-                * subsystems are free.  We can use wait_queue for the wait
-                * but this path is super cold.  Let's just sleep for a bit
-                * and retry.
+                * We want to reuse @root whose lifetime is governed by its
+                * ->cgrp.  Let's check whether @root is alive and keep it
+                * that way.  As cgroup_kill_sb() can happen anytime, we
+                * want to block it by pinning the sb so that @root doesn't
+                * get killed before mount is complete.
+                *
+                * With the sb pinned, tryget_live can reliably indicate
+                * whether @root can be reused.  If it's being killed,
+                * drain it.  We can use wait_queue for the wait but this
+                * path is super cold.  Let's just sleep a bit and retry.
                 */
-               if (!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
+               pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
+               if (IS_ERR(pinned_sb) ||
+                   !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
                        mutex_unlock(&cgroup_mutex);
+                       if (!IS_ERR_OR_NULL(pinned_sb))
+                               deactivate_super(pinned_sb);
                        msleep(10);
                        ret = restart_syscall();
                        goto out_free;
@@ -1770,6 +1802,16 @@ out_free:
                                CGROUP_SUPER_MAGIC, &new_sb);
        if (IS_ERR(dentry) || !new_sb)
                cgroup_put(&root->cgrp);
+
+       /*
+        * If @pinned_sb, we're reusing an existing root and holding an
+        * extra ref on its sb.  Mount is complete.  Put the extra ref.
+        */
+       if (pinned_sb) {
+               WARN_ON(new_sb);
+               deactivate_super(pinned_sb);
+       }
+
        return dentry;
 }
 
@@ -3328,7 +3370,7 @@ bool css_has_online_children(struct cgroup_subsys_state *css)
 
        rcu_read_lock();
        css_for_each_child(child, css) {
-               if (css->flags & CSS_ONLINE) {
+               if (child->flags & CSS_ONLINE) {
                        ret = true;
                        break;
                }
index 019d450..5664985 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/sched.h>
 #include <linux/hardirq.h>
 #include <linux/export.h>
+#include <linux/kprobes.h>
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/context_tracking.h>
@@ -104,6 +105,7 @@ void context_tracking_user_enter(void)
        }
        local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(context_tracking_user_enter);
 
 #ifdef CONFIG_PREEMPT
 /**
@@ -181,6 +183,7 @@ void context_tracking_user_exit(void)
        }
        local_irq_restore(flags);
 }
+NOKPROBE_SYMBOL(context_tracking_user_exit);
 
 /**
  * __context_tracking_task_switch - context switch the syscall callbacks
index f6b33c6..116a416 100644 (file)
@@ -1181,7 +1181,13 @@ done:
 
 int current_cpuset_is_being_rebound(void)
 {
-       return task_cs(current) == cpuset_being_rebound;
+       int ret;
+
+       rcu_read_lock();
+       ret = task_cs(current) == cpuset_being_rebound;
+       rcu_read_unlock();
+
+       return ret;
 }
 
 static int update_relax_domain_level(struct cpuset *cs, s64 val)
@@ -1617,7 +1623,17 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
         * resources, wait for the previously scheduled operations before
         * proceeding, so that we don't end up keep removing tasks added
         * after execution capability is restored.
+        *
+        * cpuset_hotplug_work calls back into cgroup core via
+        * cgroup_transfer_tasks() and waiting for it from a cgroupfs
+        * operation like this one can lead to a deadlock through kernfs
+        * active_ref protection.  Let's break the protection.  Losing the
+        * protection is okay as we check whether @cs is online after
+        * grabbing cpuset_mutex anyway.  This only happens on the legacy
+        * hierarchies.
         */
+       css_get(&cs->css);
+       kernfs_break_active_protection(of->kn);
        flush_work(&cpuset_hotplug_work);
 
        mutex_lock(&cpuset_mutex);
@@ -1645,6 +1661,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
        free_trial_cpuset(trialcs);
 out_unlock:
        mutex_unlock(&cpuset_mutex);
+       kernfs_unbreak_active_protection(of->kn);
+       css_put(&cs->css);
        return retval ?: nbytes;
 }
 
index 5fa58e4..6b17ac1 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/mm_types.h>
 #include <linux/cgroup.h>
 #include <linux/module.h>
+#include <linux/mman.h>
 
 #include "internal.h"
 
@@ -2319,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
        next_parent = rcu_dereference(next_ctx->parent_ctx);
 
        /* If neither context have a parent context; they cannot be clones. */
-       if (!parent && !next_parent)
+       if (!parent || !next_parent)
                goto unlock;
 
        if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
@@ -5128,6 +5129,7 @@ struct perf_mmap_event {
        int                     maj, min;
        u64                     ino;
        u64                     ino_generation;
+       u32                     prot, flags;
 
        struct {
                struct perf_event_header        header;
@@ -5169,6 +5171,8 @@ static void perf_event_mmap_output(struct perf_event *event,
                mmap_event->event_id.header.size += sizeof(mmap_event->min);
                mmap_event->event_id.header.size += sizeof(mmap_event->ino);
                mmap_event->event_id.header.size += sizeof(mmap_event->ino_generation);
+               mmap_event->event_id.header.size += sizeof(mmap_event->prot);
+               mmap_event->event_id.header.size += sizeof(mmap_event->flags);
        }
 
        perf_event_header__init_id(&mmap_event->event_id.header, &sample, event);
@@ -5187,6 +5191,8 @@ static void perf_event_mmap_output(struct perf_event *event,
                perf_output_put(&handle, mmap_event->min);
                perf_output_put(&handle, mmap_event->ino);
                perf_output_put(&handle, mmap_event->ino_generation);
+               perf_output_put(&handle, mmap_event->prot);
+               perf_output_put(&handle, mmap_event->flags);
        }
 
        __output_copy(&handle, mmap_event->file_name,
@@ -5205,6 +5211,7 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
        struct file *file = vma->vm_file;
        int maj = 0, min = 0;
        u64 ino = 0, gen = 0;
+       u32 prot = 0, flags = 0;
        unsigned int size;
        char tmp[16];
        char *buf = NULL;
@@ -5235,6 +5242,28 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
                gen = inode->i_generation;
                maj = MAJOR(dev);
                min = MINOR(dev);
+
+               if (vma->vm_flags & VM_READ)
+                       prot |= PROT_READ;
+               if (vma->vm_flags & VM_WRITE)
+                       prot |= PROT_WRITE;
+               if (vma->vm_flags & VM_EXEC)
+                       prot |= PROT_EXEC;
+
+               if (vma->vm_flags & VM_MAYSHARE)
+                       flags = MAP_SHARED;
+               else
+                       flags = MAP_PRIVATE;
+
+               if (vma->vm_flags & VM_DENYWRITE)
+                       flags |= MAP_DENYWRITE;
+               if (vma->vm_flags & VM_MAYEXEC)
+                       flags |= MAP_EXECUTABLE;
+               if (vma->vm_flags & VM_LOCKED)
+                       flags |= MAP_LOCKED;
+               if (vma->vm_flags & VM_HUGETLB)
+                       flags |= MAP_HUGETLB;
+
                goto got_name;
        } else {
                name = (char *)arch_vma_name(vma);
@@ -5275,6 +5304,8 @@ got_name:
        mmap_event->min = min;
        mmap_event->ino = ino;
        mmap_event->ino_generation = gen;
+       mmap_event->prot = prot;
+       mmap_event->flags = flags;
 
        if (!(vma->vm_flags & VM_EXEC))
                mmap_event->event_id.header.misc |= PERF_RECORD_MISC_MMAP_DATA;
@@ -5315,6 +5346,8 @@ void perf_event_mmap(struct vm_area_struct *vma)
                /* .min (attr_mmap2 only) */
                /* .ino (attr_mmap2 only) */
                /* .ino_generation (attr_mmap2 only) */
+               /* .prot (attr_mmap2 only) */
+               /* .flags (attr_mmap2 only) */
        };
 
        perf_event_mmap_event(&mmap_event);
@@ -6897,10 +6930,6 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
        if (ret)
                return -EFAULT;
 
-       /* disabled for now */
-       if (attr->mmap2)
-               return -EINVAL;
-
        if (attr->__reserved_1)
                return -EINVAL;
 
@@ -7429,7 +7458,19 @@ __perf_event_exit_task(struct perf_event *child_event,
                         struct perf_event_context *child_ctx,
                         struct task_struct *child)
 {
-       perf_remove_from_context(child_event, true);
+       /*
+        * Do not destroy the 'original' grouping; because of the context
+        * switch optimization the original events could've ended up in a
+        * random child task.
+        *
+        * If we were to destroy the original group, all group related
+        * operations would cease to function properly after this random
+        * child dies.
+        *
+        * Do destroy all inherited groups, we don't care about those
+        * and being thorough is better.
+        */
+       perf_remove_from_context(child_event, !!child_event->parent);
 
        /*
         * It can happen that the parent exits first, and has events
@@ -7445,7 +7486,7 @@ __perf_event_exit_task(struct perf_event *child_event,
 static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
 {
        struct perf_event *child_event, *next;
-       struct perf_event_context *child_ctx;
+       struct perf_event_context *child_ctx, *parent_ctx;
        unsigned long flags;
 
        if (likely(!child->perf_event_ctxp[ctxn])) {
@@ -7470,6 +7511,15 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        raw_spin_lock(&child_ctx->lock);
        task_ctx_sched_out(child_ctx);
        child->perf_event_ctxp[ctxn] = NULL;
+
+       /*
+        * In order to avoid freeing: child_ctx->parent_ctx->task
+        * under perf_event_context::lock, grab another reference.
+        */
+       parent_ctx = child_ctx->parent_ctx;
+       if (parent_ctx)
+               get_ctx(parent_ctx);
+
        /*
         * If this context is a clone; unclone it so it can't get
         * swapped to another process while we're removing all
@@ -7479,6 +7529,13 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
        update_context_time(child_ctx);
        raw_spin_unlock_irqrestore(&child_ctx->lock, flags);
 
+       /*
+        * Now that we no longer hold perf_event_context::lock, drop
+        * our extra child_ctx->parent_ctx reference.
+        */
+       if (parent_ctx)
+               put_ctx(parent_ctx);
+
        /*
         * Report the task dead after unscheduling the events so that we
         * won't get any samples after PERF_RECORD_EXIT. We can however still
index c445e39..6f3254e 100644 (file)
@@ -846,7 +846,7 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u
 {
        int err;
 
-       if (!consumer_del(uprobe, uc))  /* WARN? */
+       if (WARN_ON(!consumer_del(uprobe, uc)))
                return;
 
        err = register_for_each_vma(uprobe, NULL);
@@ -927,7 +927,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
        int ret = -ENOENT;
 
        uprobe = find_uprobe(inode, offset);
-       if (!uprobe)
+       if (WARN_ON(!uprobe))
                return ret;
 
        down_write(&uprobe->register_rwsem);
@@ -952,7 +952,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
        struct uprobe *uprobe;
 
        uprobe = find_uprobe(inode, offset);
-       if (!uprobe)
+       if (WARN_ON(!uprobe))
                return;
 
        down_write(&uprobe->register_rwsem);
index d2799d1..6a13c46 100644 (file)
@@ -1487,7 +1487,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
 
        total_forks++;
        spin_unlock(&current->sighand->siglock);
+       syscall_tracepoint_update(p);
        write_unlock_irq(&tasklist_lock);
+
        proc_fork_connector(p);
        cgroup_post_fork(p);
        if (clone_flags & CLONE_THREAD)
index 7339e42..1487a12 100644 (file)
@@ -455,9 +455,9 @@ EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
  */
 void irq_free_hwirqs(unsigned int from, int cnt)
 {
-       int i;
+       int i, j;
 
-       for (i = from; cnt > 0; i++, cnt--) {
+       for (i = from, j = cnt; j > 0; i++, j--) {
                irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
                arch_teardown_hwirq(i);
        }
index 6748688..369f41a 100644 (file)
@@ -1617,6 +1617,7 @@ static int __init crash_save_vmcoreinfo_init(void)
 #ifdef CONFIG_MEMORY_FAILURE
        VMCOREINFO_NUMBER(PG_hwpoison);
 #endif
+       VMCOREINFO_NUMBER(PG_head_mask);
        VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
 
        arch_crash_save_vmcoreinfo();
index 3214289..734e9a7 100644 (file)
@@ -2037,19 +2037,23 @@ static int __init populate_kprobe_blacklist(unsigned long *start,
 {
        unsigned long *iter;
        struct kprobe_blacklist_entry *ent;
-       unsigned long offset = 0, size = 0;
+       unsigned long entry, offset = 0, size = 0;
 
        for (iter = start; iter < end; iter++) {
-               if (!kallsyms_lookup_size_offset(*iter, &size, &offset)) {
-                       pr_err("Failed to find blacklist %p\n", (void *)*iter);
+               entry = arch_deref_entry_point((void *)*iter);
+
+               if (!kernel_text_address(entry) ||
+                   !kallsyms_lookup_size_offset(entry, &size, &offset)) {
+                       pr_err("Failed to find blacklist at %p\n",
+                               (void *)entry);
                        continue;
                }
 
                ent = kmalloc(sizeof(*ent), GFP_KERNEL);
                if (!ent)
                        return -ENOMEM;
-               ent->start_addr = *iter;
-               ent->end_addr = *iter + size;
+               ent->start_addr = entry;
+               ent->end_addr = entry + size;
                INIT_LIST_HEAD(&ent->list);
                list_add_tail(&ent->list, &kprobe_blacklist);
        }
index 838dc9e..be9ee15 100644 (file)
  * called from interrupt context and we have preemption disabled while
  * spinning.
  */
-static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node);
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
+
+/*
+ * We use the value 0 to represent "no CPU", thus the encoded value
+ * will be the CPU number incremented by 1.
+ */
+static inline int encode_cpu(int cpu_nr)
+{
+       return cpu_nr + 1;
+}
+
+static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
+{
+       int cpu_nr = encoded_cpu_val - 1;
+
+       return per_cpu_ptr(&osq_node, cpu_nr);
+}
 
 /*
  * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
  * Can return NULL in case we were the last queued and we updated @lock instead.
  */
-static inline struct optimistic_spin_queue *
-osq_wait_next(struct optimistic_spin_queue **lock,
-             struct optimistic_spin_queue *node,
-             struct optimistic_spin_queue *prev)
+static inline struct optimistic_spin_node *
+osq_wait_next(struct optimistic_spin_queue *lock,
+             struct optimistic_spin_node *node,
+             struct optimistic_spin_node *prev)
 {
-       struct optimistic_spin_queue *next = NULL;
+       struct optimistic_spin_node *next = NULL;
+       int curr = encode_cpu(smp_processor_id());
+       int old;
+
+       /*
+        * If there is a prev node in queue, then the 'old' value will be
+        * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
+        * we're currently last in queue, then the queue will then become empty.
+        */
+       old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
 
        for (;;) {
-               if (*lock == node && cmpxchg(lock, node, prev) == node) {
+               if (atomic_read(&lock->tail) == curr &&
+                   atomic_cmpxchg(&lock->tail, curr, old) == curr) {
                        /*
                         * We were the last queued, we moved @lock back. @prev
                         * will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock,
        return next;
 }
 
-bool osq_lock(struct optimistic_spin_queue **lock)
+bool osq_lock(struct optimistic_spin_queue *lock)
 {
-       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
-       struct optimistic_spin_queue *prev, *next;
+       struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
+       struct optimistic_spin_node *prev, *next;
+       int curr = encode_cpu(smp_processor_id());
+       int old;
 
        node->locked = 0;
        node->next = NULL;
+       node->cpu = curr;
 
-       node->prev = prev = xchg(lock, node);
-       if (likely(prev == NULL))
+       old = atomic_xchg(&lock->tail, curr);
+       if (old == OSQ_UNLOCKED_VAL)
                return true;
 
+       prev = decode_cpu(old);
+       node->prev = prev;
        ACCESS_ONCE(prev->next) = node;
 
        /*
@@ -149,20 +180,21 @@ unqueue:
        return false;
 }
 
-void osq_unlock(struct optimistic_spin_queue **lock)
+void osq_unlock(struct optimistic_spin_queue *lock)
 {
-       struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node);
-       struct optimistic_spin_queue *next;
+       struct optimistic_spin_node *node, *next;
+       int curr = encode_cpu(smp_processor_id());
 
        /*
         * Fast path for the uncontended case.
         */
-       if (likely(cmpxchg(lock, node, NULL) == node))
+       if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
                return;
 
        /*
         * Second most likely case.
         */
+       node = this_cpu_ptr(&osq_node);
        next = xchg(&node->next, NULL);
        if (next) {
                ACCESS_ONCE(next->locked) = 1;
index a2dbac4..74356dc 100644 (file)
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
  * mutex_lock()/rwsem_down_{read,write}() etc.
  */
 
-struct optimistic_spin_queue {
-       struct optimistic_spin_queue *next, *prev;
+struct optimistic_spin_node {
+       struct optimistic_spin_node *next, *prev;
        int locked; /* 1 if lock acquired */
+       int cpu; /* encoded CPU # value */
 };
 
-extern bool osq_lock(struct optimistic_spin_queue **lock);
-extern void osq_unlock(struct optimistic_spin_queue **lock);
+extern bool osq_lock(struct optimistic_spin_queue *lock);
+extern void osq_unlock(struct optimistic_spin_queue *lock);
 
 #endif /* __LINUX_MCS_SPINLOCK_H */
index bc73d33..acca2c1 100644 (file)
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
        INIT_LIST_HEAD(&lock->wait_list);
        mutex_clear_owner(lock);
 #ifdef CONFIG_MUTEX_SPIN_ON_OWNER
-       lock->osq = NULL;
+       osq_lock_init(&lock->osq);
 #endif
 
        debug_mutex_init(lock, name, key);
index 14193d5..ab29b6a 100644 (file)
@@ -31,3 +31,8 @@ static inline int debug_rt_mutex_detect_deadlock(struct rt_mutex_waiter *waiter,
 {
        return (waiter != NULL);
 }
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+       debug_rt_mutex_print_deadlock(w);
+}
index a620d4d..fc60594 100644 (file)
@@ -83,6 +83,47 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
                owner = *p;
        } while (cmpxchg(p, owner, owner | RT_MUTEX_HAS_WAITERS) != owner);
 }
+
+/*
+ * Safe fastpath aware unlock:
+ * 1) Clear the waiters bit
+ * 2) Drop lock->wait_lock
+ * 3) Try to unlock the lock with cmpxchg
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       struct task_struct *owner = rt_mutex_owner(lock);
+
+       clear_rt_mutex_waiters(lock);
+       raw_spin_unlock(&lock->wait_lock);
+       /*
+        * If a new waiter comes in between the unlock and the cmpxchg
+        * we have two situations:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        * cmpxchg(p, owner, 0) == owner
+        *                                      mark_rt_mutex_waiters(lock);
+        *                                      acquire(lock);
+        * or:
+        *
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      mark_rt_mutex_waiters(lock);
+        *
+        * cmpxchg(p, owner, 0) != owner
+        *                                      enqueue_waiter();
+        *                                      unlock(wait_lock);
+        * lock(wait_lock);
+        * wake waiter();
+        * unlock(wait_lock);
+        *                                      lock(wait_lock);
+        *                                      acquire(lock);
+        */
+       return rt_mutex_cmpxchg(lock, owner, NULL);
+}
+
 #else
 # define rt_mutex_cmpxchg(l,c,n)       (0)
 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
@@ -90,6 +131,17 @@ static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
        lock->owner = (struct task_struct *)
                        ((unsigned long)lock->owner | RT_MUTEX_HAS_WAITERS);
 }
+
+/*
+ * Simple slow path only version: lock->owner is protected by lock->wait_lock.
+ */
+static inline bool unlock_rt_mutex_safe(struct rt_mutex *lock)
+       __releases(lock->wait_lock)
+{
+       lock->owner = NULL;
+       raw_spin_unlock(&lock->wait_lock);
+       return true;
+}
 #endif
 
 static inline int
@@ -260,27 +312,36 @@ static void rt_mutex_adjust_prio(struct task_struct *task)
  */
 int max_lock_depth = 1024;
 
+static inline struct rt_mutex *task_blocked_on_lock(struct task_struct *p)
+{
+       return p->pi_blocked_on ? p->pi_blocked_on->lock : NULL;
+}
+
 /*
  * Adjust the priority chain. Also used for deadlock detection.
  * Decreases task's usage by one - may thus free the task.
  *
- * @task: the task owning the mutex (owner) for which a chain walk is probably
- *       needed
+ * @task:      the task owning the mutex (owner) for which a chain walk is
+ *             probably needed
  * @deadlock_detect: do we have to carry out deadlock detection?
- * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
- *            things for a task that has just got its priority adjusted, and
- *            is waiting on a mutex)
+ * @orig_lock: the mutex (can be NULL if we are walking the chain to recheck
+ *             things for a task that has just got its priority adjusted, and
+ *             is waiting on a mutex)
+ * @next_lock: the mutex on which the owner of @orig_lock was blocked before
+ *             we dropped its pi_lock. Is never dereferenced, only used for
+ *             comparison to detect lock chain changes.
  * @orig_waiter: rt_mutex_waiter struct for the task that has just donated
- *              its priority to the mutex owner (can be NULL in the case
- *              depicted above or if the top waiter is gone away and we are
- *              actually deboosting the owner)
- * @top_task: the current top waiter
+ *             its priority to the mutex owner (can be NULL in the case
+ *             depicted above or if the top waiter is gone away and we are
+ *             actually deboosting the owner)
+ * @top_task:  the current top waiter
  *
  * Returns 0 or -EDEADLK.
  */
 static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                                      int deadlock_detect,
                                      struct rt_mutex *orig_lock,
+                                     struct rt_mutex *next_lock,
                                      struct rt_mutex_waiter *orig_waiter,
                                      struct task_struct *top_task)
 {
@@ -314,7 +375,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                }
                put_task_struct(task);
 
-               return deadlock_detect ? -EDEADLK : 0;
+               return -EDEADLK;
        }
  retry:
        /*
@@ -338,6 +399,18 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (orig_waiter && !rt_mutex_owner(orig_lock))
                goto out_unlock_pi;
 
+       /*
+        * We dropped all locks after taking a refcount on @task, so
+        * the task might have moved on in the lock chain or even left
+        * the chain completely and blocks now on an unrelated lock or
+        * on @orig_lock.
+        *
+        * We stored the lock on which @task was blocked in @next_lock,
+        * so we can detect the chain change.
+        */
+       if (next_lock != waiter->lock)
+               goto out_unlock_pi;
+
        /*
         * Drop out, when the task has no waiters. Note,
         * top_waiter can be NULL, when we are in the deboosting
@@ -377,7 +450,7 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
        if (lock == orig_lock || rt_mutex_owner(lock) == top_task) {
                debug_rt_mutex_deadlock(deadlock_detect, orig_waiter, lock);
                raw_spin_unlock(&lock->wait_lock);
-               ret = deadlock_detect ? -EDEADLK : 0;
+               ret = -EDEADLK;
                goto out_unlock_pi;
        }
 
@@ -422,11 +495,26 @@ static int rt_mutex_adjust_prio_chain(struct task_struct *task,
                __rt_mutex_adjust_prio(task);
        }
 
+       /*
+        * Check whether the task which owns the current lock is pi
+        * blocked itself. If yes we store a pointer to the lock for
+        * the lock chain change detection above. After we dropped
+        * task->pi_lock next_lock cannot be dereferenced anymore.
+        */
+       next_lock = task_blocked_on_lock(task);
+
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
        top_waiter = rt_mutex_top_waiter(lock);
        raw_spin_unlock(&lock->wait_lock);
 
+       /*
+        * We reached the end of the lock chain. Stop right here. No
+        * point to go back just to figure that out.
+        */
+       if (!next_lock)
+               goto out_put_task;
+
        if (!detect_deadlock && waiter != top_waiter)
                goto out_put_task;
 
@@ -536,8 +624,9 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 {
        struct task_struct *owner = rt_mutex_owner(lock);
        struct rt_mutex_waiter *top_waiter = waiter;
-       unsigned long flags;
+       struct rt_mutex *next_lock;
        int chain_walk = 0, res;
+       unsigned long flags;
 
        /*
         * Early deadlock detection. We really don't want the task to
@@ -548,7 +637,7 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
         * which is wrong, as the other waiter is not in a deadlock
         * situation.
         */
-       if (detect_deadlock && owner == task)
+       if (owner == task)
                return -EDEADLK;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -569,20 +658,28 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
        if (!owner)
                return 0;
 
+       raw_spin_lock_irqsave(&owner->pi_lock, flags);
        if (waiter == rt_mutex_top_waiter(lock)) {
-               raw_spin_lock_irqsave(&owner->pi_lock, flags);
                rt_mutex_dequeue_pi(owner, top_waiter);
                rt_mutex_enqueue_pi(owner, waiter);
 
                __rt_mutex_adjust_prio(owner);
                if (owner->pi_blocked_on)
                        chain_walk = 1;
-               raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
-       }
-       else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock))
+       } else if (debug_rt_mutex_detect_deadlock(waiter, detect_deadlock)) {
                chain_walk = 1;
+       }
 
-       if (!chain_walk)
+       /* Store the lock on which owner is blocked or NULL */
+       next_lock = task_blocked_on_lock(owner);
+
+       raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
+       /*
+        * Even if full deadlock detection is on, if the owner is not
+        * blocked itself, we can avoid finding this out in the chain
+        * walk.
+        */
+       if (!chain_walk || !next_lock)
                return 0;
 
        /*
@@ -594,8 +691,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock, waiter,
-                                        task);
+       res = rt_mutex_adjust_prio_chain(owner, detect_deadlock, lock,
+                                        next_lock, waiter, task);
 
        raw_spin_lock(&lock->wait_lock);
 
@@ -605,7 +702,8 @@ static int task_blocks_on_rt_mutex(struct rt_mutex *lock,
 /*
  * Wake up the next waiter on the lock.
  *
- * Remove the top waiter from the current tasks waiter list and wake it up.
+ * Remove the top waiter from the current tasks pi waiter list and
+ * wake it up.
  *
  * Called with lock->wait_lock held.
  */
@@ -626,10 +724,23 @@ static void wakeup_next_waiter(struct rt_mutex *lock)
         */
        rt_mutex_dequeue_pi(current, waiter);
 
-       rt_mutex_set_owner(lock, NULL);
+       /*
+        * As we are waking up the top waiter, and the waiter stays
+        * queued on the lock until it gets the lock, this lock
+        * obviously has waiters. Just set the bit here and this has
+        * the added benefit of forcing all new tasks into the
+        * slow path making sure no task of lower priority than
+        * the top waiter can steal this lock.
+        */
+       lock->owner = (void *) RT_MUTEX_HAS_WAITERS;
 
        raw_spin_unlock_irqrestore(&current->pi_lock, flags);
 
+       /*
+        * It's safe to dereference waiter as it cannot go away as
+        * long as we hold lock->wait_lock. The waiter task needs to
+        * acquire it in order to dequeue the waiter.
+        */
        wake_up_process(waiter->task);
 }
 
@@ -644,8 +755,8 @@ static void remove_waiter(struct rt_mutex *lock,
 {
        int first = (waiter == rt_mutex_top_waiter(lock));
        struct task_struct *owner = rt_mutex_owner(lock);
+       struct rt_mutex *next_lock = NULL;
        unsigned long flags;
-       int chain_walk = 0;
 
        raw_spin_lock_irqsave(&current->pi_lock, flags);
        rt_mutex_dequeue(lock, waiter);
@@ -669,13 +780,13 @@ static void remove_waiter(struct rt_mutex *lock,
                }
                __rt_mutex_adjust_prio(owner);
 
-               if (owner->pi_blocked_on)
-                       chain_walk = 1;
+               /* Store the lock on which owner is blocked or NULL */
+               next_lock = task_blocked_on_lock(owner);
 
                raw_spin_unlock_irqrestore(&owner->pi_lock, flags);
        }
 
-       if (!chain_walk)
+       if (!next_lock)
                return;
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
@@ -683,7 +794,7 @@ static void remove_waiter(struct rt_mutex *lock,
 
        raw_spin_unlock(&lock->wait_lock);
 
-       rt_mutex_adjust_prio_chain(owner, 0, lock, NULL, current);
+       rt_mutex_adjust_prio_chain(owner, 0, lock, next_lock, NULL, current);
 
        raw_spin_lock(&lock->wait_lock);
 }
@@ -696,6 +807,7 @@ static void remove_waiter(struct rt_mutex *lock,
 void rt_mutex_adjust_pi(struct task_struct *task)
 {
        struct rt_mutex_waiter *waiter;
+       struct rt_mutex *next_lock;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&task->pi_lock, flags);
@@ -706,12 +818,13 @@ void rt_mutex_adjust_pi(struct task_struct *task)
                raw_spin_unlock_irqrestore(&task->pi_lock, flags);
                return;
        }
-
+       next_lock = waiter->lock;
        raw_spin_unlock_irqrestore(&task->pi_lock, flags);
 
        /* gets dropped in rt_mutex_adjust_prio_chain()! */
        get_task_struct(task);
-       rt_mutex_adjust_prio_chain(task, 0, NULL, NULL, task);
+
+       rt_mutex_adjust_prio_chain(task, 0, NULL, next_lock, NULL, task);
 }
 
 /**
@@ -763,6 +876,26 @@ __rt_mutex_slowlock(struct rt_mutex *lock, int state,
        return ret;
 }
 
+static void rt_mutex_handle_deadlock(int res, int detect_deadlock,
+                                    struct rt_mutex_waiter *w)
+{
+       /*
+        * If the result is not -EDEADLOCK or the caller requested
+        * deadlock detection, nothing to do here.
+        */
+       if (res != -EDEADLOCK || detect_deadlock)
+               return;
+
+       /*
+        * Yell lowdly and stop the task right here.
+        */
+       rt_mutex_print_deadlock(w);
+       while (1) {
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule();
+       }
+}
+
 /*
  * Slow path lock function:
  */
@@ -802,8 +935,10 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
 
        set_current_state(TASK_RUNNING);
 
-       if (unlikely(ret))
+       if (unlikely(ret)) {
                remove_waiter(lock, &waiter);
+               rt_mutex_handle_deadlock(ret, detect_deadlock, &waiter);
+       }
 
        /*
         * try_to_take_rt_mutex() sets the waiter bit
@@ -859,12 +994,49 @@ rt_mutex_slowunlock(struct rt_mutex *lock)
 
        rt_mutex_deadlock_account_unlock(current);
 
-       if (!rt_mutex_has_waiters(lock)) {
-               lock->owner = NULL;
-               raw_spin_unlock(&lock->wait_lock);
-               return;
+       /*
+        * We must be careful here if the fast path is enabled. If we
+        * have no waiters queued we cannot set owner to NULL here
+        * because of:
+        *
+        * foo->lock->owner = NULL;
+        *                      rtmutex_lock(foo->lock);   <- fast path
+        *                      free = atomic_dec_and_test(foo->refcnt);
+        *                      rtmutex_unlock(foo->lock); <- fast path
+        *                      if (free)
+        *                              kfree(foo);
+        * raw_spin_unlock(foo->lock->wait_lock);
+        *
+        * So for the fastpath enabled kernel:
+        *
+        * Nothing can set the waiters bit as long as we hold
+        * lock->wait_lock. So we do the following sequence:
+        *
+        *      owner = rt_mutex_owner(lock);
+        *      clear_rt_mutex_waiters(lock);
+        *      raw_spin_unlock(&lock->wait_lock);
+        *      if (cmpxchg(&lock->owner, owner, 0) == owner)
+        *              return;
+        *      goto retry;
+        *
+        * The fastpath disabled variant is simple as all access to
+        * lock->owner is serialized by lock->wait_lock:
+        *
+        *      lock->owner = NULL;
+        *      raw_spin_unlock(&lock->wait_lock);
+        */
+       while (!rt_mutex_has_waiters(lock)) {
+               /* Drops lock->wait_lock ! */
+               if (unlock_rt_mutex_safe(lock) == true)
+                       return;
+               /* Relock the rtmutex and try again */
+               raw_spin_lock(&lock->wait_lock);
        }
 
+       /*
+        * The wakeup next waiter path does not suffer from the above
+        * race. See the comments there.
+        */
        wakeup_next_waiter(lock);
 
        raw_spin_unlock(&lock->wait_lock);
@@ -1112,7 +1284,8 @@ int rt_mutex_start_proxy_lock(struct rt_mutex *lock,
                return 1;
        }
 
-       ret = task_blocks_on_rt_mutex(lock, waiter, task, detect_deadlock);
+       /* We enforce deadlock detection for futexes */
+       ret = task_blocks_on_rt_mutex(lock, waiter, task, 1);
 
        if (ret && !rt_mutex_owner(lock)) {
                /*
index a1a1dd0..f6a1f3c 100644 (file)
@@ -24,3 +24,8 @@
 #define debug_rt_mutex_print_deadlock(w)               do { } while (0)
 #define debug_rt_mutex_detect_deadlock(w,d)            (d)
 #define debug_rt_mutex_reset_waiter(w)                 do { } while (0)
+
+static inline void rt_mutex_print_deadlock(struct rt_mutex_waiter *w)
+{
+       WARN(1, "rtmutex deadlock detected\n");
+}
index 9be8a91..2c93571 100644 (file)
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
        unsigned long flags;
 
        if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
-               ret = (sem->activity != 0);
+               ret = (sem->count != 0);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
        }
        return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        debug_check_no_locks_freed((void *)sem, sizeof(*sem));
        lockdep_init_map(&sem->dep_map, name, key, 0);
 #endif
-       sem->activity = 0;
+       sem->count = 0;
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
 }
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
                waiter = list_entry(next, struct rwsem_waiter, list);
        } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
 
-       sem->activity += woken;
+       sem->count += woken;
 
  out:
        return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
-               sem->activity++;
+               sem->count++;
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
                goto out;
        }
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity >= 0 && list_empty(&sem->wait_list)) {
+       if (sem->count >= 0 && list_empty(&sem->wait_list)) {
                /* granted */
-               sem->activity++;
+               sem->count++;
                ret = 1;
        }
 
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
                 * itself into sleep and waiting for system woke it or someone
                 * else in the head of the wait list up.
                 */
-               if (sem->activity == 0)
+               if (sem->count == 0)
                        break;
                set_task_state(tsk, TASK_UNINTERRUPTIBLE);
                raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
                raw_spin_lock_irqsave(&sem->wait_lock, flags);
        }
        /* got the lock */
-       sem->activity = -1;
+       sem->count = -1;
        list_del(&waiter.list);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (sem->activity == 0) {
+       if (sem->count == 0) {
                /* got the lock */
-               sem->activity = -1;
+               sem->count = -1;
                ret = 1;
        }
 
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       if (--sem->activity == 0 && !list_empty(&sem->wait_list))
+       if (--sem->count == 0 && !list_empty(&sem->wait_list))
                sem = __rwsem_wake_one_writer(sem);
 
        raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       sem->activity = 0;
+       sem->count = 0;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 1);
 
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
 
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
 
-       sem->activity = 1;
+       sem->count = 1;
        if (!list_empty(&sem->wait_list))
                sem = __rwsem_do_wake(sem, 0);
 
index dacc321..a2391ac 100644 (file)
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
        sem->count = RWSEM_UNLOCKED_VALUE;
        raw_spin_lock_init(&sem->wait_lock);
        INIT_LIST_HEAD(&sem->wait_list);
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
        sem->owner = NULL;
-       sem->osq = NULL;
+       osq_lock_init(&sem->osq);
 #endif
 }
 
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
        return false;
 }
 
-#ifdef CONFIG_SMP
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 /*
  * Try to acquire write lock before the writer has been put on wait queue.
  */
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
 static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
 {
        struct task_struct *owner;
-       bool on_cpu = true;
+       bool on_cpu = false;
 
        if (need_resched())
-               return 0;
+               return false;
 
        rcu_read_lock();
        owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
        rcu_read_unlock();
 
        /*
-        * If sem->owner is not set, the rwsem owner may have
-        * just acquired it and not set the owner yet or the rwsem
-        * has been released.
+        * If sem->owner is not set, yet we have just recently entered the
+        * slowpath, then there is a possibility reader(s) may have the lock.
+        * To be safe, avoid spinning in these situations.
         */
        return on_cpu;
 }
index 42f806d..e2d3bc7 100644 (file)
@@ -12,7 +12,7 @@
 
 #include <linux/atomic.h>
 
-#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM)
+#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
 static inline void rwsem_set_owner(struct rw_semaphore *sem)
 {
        sem->owner = current;
index 49e0a20..fcc2611 100644 (file)
@@ -35,6 +35,7 @@
 
 static int nocompress;
 static int noresume;
+static int nohibernate;
 static int resume_wait;
 static unsigned int resume_delay;
 static char resume_file[256] = CONFIG_PM_STD_PARTITION;
@@ -62,6 +63,11 @@ bool freezer_test_done;
 
 static const struct platform_hibernation_ops *hibernation_ops;
 
+bool hibernation_available(void)
+{
+       return (nohibernate == 0);
+}
+
 /**
  * hibernation_set_ops - Set the global hibernate operations.
  * @ops: Hibernation operations to use in subsequent hibernation transitions.
@@ -642,6 +648,11 @@ int hibernate(void)
 {
        int error;
 
+       if (!hibernation_available()) {
+               pr_debug("PM: Hibernation not available.\n");
+               return -EPERM;
+       }
+
        lock_system_sleep();
        /* The snapshot device should not be opened while we're running */
        if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
@@ -734,7 +745,7 @@ static int software_resume(void)
        /*
         * If the user said "noresume".. bail out early.
         */
-       if (noresume)
+       if (noresume || !hibernation_available())
                return 0;
 
        /*
@@ -900,6 +911,9 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr,
        int i;
        char *start = buf;
 
+       if (!hibernation_available())
+               return sprintf(buf, "[disabled]\n");
+
        for (i = HIBERNATION_FIRST; i <= HIBERNATION_MAX; i++) {
                if (!hibernation_modes[i])
                        continue;
@@ -934,6 +948,9 @@ static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr,
        char *p;
        int mode = HIBERNATION_INVALID;
 
+       if (!hibernation_available())
+               return -EPERM;
+
        p = memchr(buf, '\n', n);
        len = p ? p - buf : n;
 
@@ -1101,6 +1118,10 @@ static int __init hibernate_setup(char *str)
                noresume = 1;
        else if (!strncmp(str, "nocompress", 10))
                nocompress = 1;
+       else if (!strncmp(str, "no", 2)) {
+               noresume = 1;
+               nohibernate = 1;
+       }
        return 1;
 }
 
@@ -1125,9 +1146,23 @@ static int __init resumedelay_setup(char *str)
        return 1;
 }
 
+static int __init nohibernate_setup(char *str)
+{
+       noresume = 1;
+       nohibernate = 1;
+       return 1;
+}
+
+static int __init kaslr_nohibernate_setup(char *str)
+{
+       return nohibernate_setup(str);
+}
+
 __setup("noresume", noresume_setup);
 __setup("resume_offset=", resume_offset_setup);
 __setup("resume=", resume_setup);
 __setup("hibernate=", hibernate_setup);
 __setup("resumewait", resumewait_setup);
 __setup("resumedelay=", resumedelay_setup);
+__setup("nohibernate", nohibernate_setup);
+__setup("kaslr", kaslr_nohibernate_setup);
index 573410d..8e90f33 100644 (file)
@@ -300,13 +300,11 @@ static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
                        s += sprintf(s,"%s ", pm_states[i].label);
 
 #endif
-#ifdef CONFIG_HIBERNATION
-       s += sprintf(s, "%s\n", "disk");
-#else
+       if (hibernation_available())
+               s += sprintf(s, "disk ");
        if (s != buf)
                /* convert the last space to a newline */
                *(s-1) = '\n';
-#endif
        return (s - buf);
 }
 
index 0ca8d83..4ee194e 100644 (file)
@@ -186,6 +186,7 @@ void thaw_processes(void)
 
        printk("Restarting tasks ... ");
 
+       __usermodehelper_set_disable_depth(UMH_FREEZING);
        thaw_workqueues();
 
        read_lock(&tasklist_lock);
index 4dd8822..ed35a47 100644 (file)
@@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state)
                error = suspend_ops->begin(state);
                if (error)
                        goto Close;
-       } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) {
+       } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
                error = freeze_ops->begin();
                if (error)
                        goto Close;
@@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state)
  Close:
        if (need_suspend_ops(state) && suspend_ops->end)
                suspend_ops->end();
-       else if (state == PM_SUSPEND_FREEZE && freeze_ops->end)
+       else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
                freeze_ops->end();
 
        return error;
index 98d3575..526e891 100644 (file)
@@ -49,6 +49,9 @@ static int snapshot_open(struct inode *inode, struct file *filp)
        struct snapshot_data *data;
        int error;
 
+       if (!hibernation_available())
+               return -EPERM;
+
        lock_system_sleep();
 
        if (!atomic_add_unless(&snapshot_device_available, -1, 0)) {
index ea2d5f6..13e839d 100644 (file)
@@ -1416,9 +1416,10 @@ static int have_callable_console(void)
 /*
  * Can we actually use the console at this time on this cpu?
  *
- * Console drivers may assume that per-cpu resources have been allocated. So
- * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't
- * call them until this CPU is officially up.
+ * Console drivers may assume that per-cpu resources have
+ * been allocated. So unless they're explicitly marked as
+ * being able to cope (CON_ANYTIME) don't call them until
+ * this CPU is officially up.
  */
 static inline int can_use_console(unsigned int cpu)
 {
@@ -1431,10 +1432,8 @@ static inline int can_use_console(unsigned int cpu)
  * console_lock held, and 'console_locked' set) if it
  * is successful, false otherwise.
  */
-static int console_trylock_for_printk(void)
+static int console_trylock_for_printk(unsigned int cpu)
 {
-       unsigned int cpu = smp_processor_id();
-
        if (!console_trylock())
                return 0;
        /*
@@ -1609,8 +1608,7 @@ asmlinkage int vprintk_emit(int facility, int level,
                 */
                if (!oops_in_progress && !lockdep_recursing(current)) {
                        recursion_bug = 1;
-                       local_irq_restore(flags);
-                       return 0;
+                       goto out_restore_irqs;
                }
                zap_locks();
        }
@@ -1718,27 +1716,21 @@ asmlinkage int vprintk_emit(int facility, int level,
 
        logbuf_cpu = UINT_MAX;
        raw_spin_unlock(&logbuf_lock);
-       lockdep_on();
-       local_irq_restore(flags);
 
        /* If called from the scheduler, we can not call up(). */
-       if (in_sched)
-               return printed_len;
-
-       /*
-        * Disable preemption to avoid being preempted while holding
-        * console_sem which would prevent anyone from printing to console
-        */
-       preempt_disable();
-       /*
-        * Try to acquire and then immediately release the console semaphore.
-        * The release will print out buffers and wake up /dev/kmsg and syslog()
-        * users.
-        */
-       if (console_trylock_for_printk())
-               console_unlock();
-       preempt_enable();
+       if (!in_sched) {
+               /*
+                * Try to acquire and then immediately release the console
+                * semaphore.  The release will print out buffers and wake up
+                * /dev/kmsg and syslog() users.
+                */
+               if (console_trylock_for_printk(this_cpu))
+                       console_unlock();
+       }
 
+       lockdep_on();
+out_restore_irqs:
+       local_irq_restore(flags);
        return printed_len;
 }
 EXPORT_SYMBOL(vprintk_emit);
index f1ba773..625d0b0 100644 (file)
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
        rdp->passed_quiesce = 1;
 }
 
+static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
+
+static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
+       .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
+       .dynticks = ATOMIC_INIT(1),
+#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
+       .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
+       .dynticks_idle = ATOMIC_INIT(1),
+#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
+};
+
+/*
+ * Let the RCU core know that this CPU has gone through the scheduler,
+ * which is a quiescent state.  This is called when the need for a
+ * quiescent state is urgent, so we burn an atomic operation and full
+ * memory barriers to let the RCU core know about it, regardless of what
+ * this CPU might (or might not) do in the near future.
+ *
+ * We inform the RCU core by emulating a zero-duration dyntick-idle
+ * period, which we in turn do by incrementing the ->dynticks counter
+ * by two.
+ */
+static void rcu_momentary_dyntick_idle(void)
+{
+       unsigned long flags;
+       struct rcu_data *rdp;
+       struct rcu_dynticks *rdtp;
+       int resched_mask;
+       struct rcu_state *rsp;
+
+       local_irq_save(flags);
+
+       /*
+        * Yes, we can lose flag-setting operations.  This is OK, because
+        * the flag will be set again after some delay.
+        */
+       resched_mask = raw_cpu_read(rcu_sched_qs_mask);
+       raw_cpu_write(rcu_sched_qs_mask, 0);
+
+       /* Find the flavor that needs a quiescent state. */
+       for_each_rcu_flavor(rsp) {
+               rdp = raw_cpu_ptr(rsp->rda);
+               if (!(resched_mask & rsp->flavor_mask))
+                       continue;
+               smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
+               if (ACCESS_ONCE(rdp->mynode->completed) !=
+                   ACCESS_ONCE(rdp->cond_resched_completed))
+                       continue;
+
+               /*
+                * Pretend to be momentarily idle for the quiescent state.
+                * This allows the grace-period kthread to record the
+                * quiescent state, with no need for this CPU to do anything
+                * further.
+                */
+               rdtp = this_cpu_ptr(&rcu_dynticks);
+               smp_mb__before_atomic(); /* Earlier stuff before QS. */
+               atomic_add(2, &rdtp->dynticks);  /* QS. */
+               smp_mb__after_atomic(); /* Later stuff after QS. */
+               break;
+       }
+       local_irq_restore(flags);
+}
+
 /*
  * Note a context switch.  This is a quiescent state for RCU-sched,
  * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
        trace_rcu_utilization(TPS("Start context switch"));
        rcu_sched_qs(cpu);
        rcu_preempt_note_context_switch(cpu);
+       if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+               rcu_momentary_dyntick_idle();
        trace_rcu_utilization(TPS("End context switch"));
 }
 EXPORT_SYMBOL_GPL(rcu_note_context_switch);
 
-static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
-       .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
-       .dynticks = ATOMIC_INIT(1),
-#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
-       .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
-       .dynticks_idle = ATOMIC_INIT(1),
-#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
-};
-
 static long blimit = 10;       /* Maximum callbacks per rcu_do_batch. */
 static long qhimark = 10000;   /* If this many pending, ignore blimit. */
 static long qlowmark = 100;    /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
 module_param(jiffies_till_first_fqs, ulong, 0644);
 module_param(jiffies_till_next_fqs, ulong, 0644);
 
+/*
+ * How long the grace period must be before we start recruiting
+ * quiescent-state help from rcu_note_context_switch().
+ */
+static ulong jiffies_till_sched_qs = HZ / 20;
+module_param(jiffies_till_sched_qs, ulong, 0644);
+
 static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
                                  struct rcu_data *rdp);
 static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
                                    bool *isidle, unsigned long *maxj)
 {
        unsigned int curr;
+       int *rcrmp;
        unsigned int snap;
 
        curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
        }
 
        /*
-        * There is a possibility that a CPU in adaptive-ticks state
-        * might run in the kernel with the scheduling-clock tick disabled
-        * for an extended time period.  Invoke rcu_kick_nohz_cpu() to
-        * force the CPU to restart the scheduling-clock tick in this
-        * CPU is in this state.
-        */
-       rcu_kick_nohz_cpu(rdp->cpu);
-
-       /*
-        * Alternatively, the CPU might be running in the kernel
-        * for an extended period of time without a quiescent state.
-        * Attempt to force the CPU through the scheduler to gain the
-        * needed quiescent state, but only if the grace period has gone
-        * on for an uncommonly long time.  If there are many stuck CPUs,
-        * we will beat on the first one until it gets unstuck, then move
-        * to the next.  Only do this for the primary flavor of RCU.
+        * A CPU running for an extended time within the kernel can
+        * delay RCU grace periods.  When the CPU is in NO_HZ_FULL mode,
+        * even context-switching back and forth between a pair of
+        * in-kernel CPU-bound tasks cannot advance grace periods.
+        * So if the grace period is old enough, make the CPU pay attention.
+        * Note that the unsynchronized assignments to the per-CPU
+        * rcu_sched_qs_mask variable are safe.  Yes, setting of
+        * bits can be lost, but they will be set again on the next
+        * force-quiescent-state pass.  So lost bit sets do not result
+        * in incorrect behavior, merely in a grace period lasting
+        * a few jiffies longer than it might otherwise.  Because
+        * there are at most four threads involved, and because the
+        * updates are only once every few jiffies, the probability of
+        * lossage (and thus of slight grace-period extension) is
+        * quite low.
+        *
+        * Note that if the jiffies_till_sched_qs boot/sysfs parameter
+        * is set too high, we override with half of the RCU CPU stall
+        * warning delay.
         */
-       if (rdp->rsp == rcu_state_p &&
+       rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
+       if (ULONG_CMP_GE(jiffies,
+                        rdp->rsp->gp_start + jiffies_till_sched_qs) ||
            ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-               rdp->rsp->jiffies_resched += 5;
-               resched_cpu(rdp->cpu);
+               if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+                       ACCESS_ONCE(rdp->cond_resched_completed) =
+                               ACCESS_ONCE(rdp->mynode->completed);
+                       smp_mb(); /* ->cond_resched_completed before *rcrmp. */
+                       ACCESS_ONCE(*rcrmp) =
+                               ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
+                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+                       rdp->rsp->jiffies_resched += 5; /* Enable beating. */
+               } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
+                       /* Time to beat on that CPU again! */
+                       resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
+                       rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
+               }
        }
 
        return 0;
@@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
                               "rcu_node_fqs_1",
                               "rcu_node_fqs_2",
                               "rcu_node_fqs_3" };  /* Match MAX_RCU_LVLS */
+       static u8 fl_mask = 0x1;
        int cpustride = 1;
        int i;
        int j;
@@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
        for (i = 1; i < rcu_num_lvls; i++)
                rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
        rcu_init_levelspread(rsp);
+       rsp->flavor_mask = fl_mask;
+       fl_mask <<= 1;
 
        /* Initialize the elements themselves, starting from the leaves. */
 
index bf2c1e6..0f69a79 100644 (file)
@@ -307,6 +307,9 @@ struct rcu_data {
        /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
        unsigned long dynticks_fqs;     /* Kicked due to dynticks idle. */
        unsigned long offline_fqs;      /* Kicked due to being offline. */
+       unsigned long cond_resched_completed;
+                                       /* Grace period that needs help */
+                                       /*  from cond_resched(). */
 
        /* 5) __rcu_pending() statistics. */
        unsigned long n_rcu_pending;    /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@ struct rcu_state {
        struct rcu_node *level[RCU_NUM_LVLS];   /* Hierarchy levels. */
        u32 levelcnt[MAX_RCU_LVLS + 1];         /* # nodes in each level. */
        u8 levelspread[RCU_NUM_LVLS];           /* kids/node in each level. */
+       u8 flavor_mask;                         /* bit in flavor mask. */
        struct rcu_data __percpu *rda;          /* pointer of percu rcu_data. */
        void (*call)(struct rcu_head *head,     /* call_rcu() flavor. */
                     void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
 static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
 static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
 static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
-static void rcu_kick_nohz_cpu(int cpu);
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
 static bool init_nocb_callback_list(struct rcu_data *rdp);
 static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
 static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
index cbc2c45..02ac0fb 100644 (file)
@@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
  * if an adaptive-ticks CPU is failing to respond to the current grace
  * period and has not be idle from an RCU perspective, kick it.
  */
-static void rcu_kick_nohz_cpu(int cpu)
+static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
 {
 #ifdef CONFIG_NO_HZ_FULL
        if (tick_nohz_full_cpu(cpu))
index a2aeb4d..bc78835 100644 (file)
@@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
 EXPORT_SYMBOL_GPL(wait_rcu_gp);
 
 #ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
-static inline void debug_init_rcu_head(struct rcu_head *head)
+void init_rcu_head(struct rcu_head *head)
 {
        debug_object_init(head, &rcuhead_debug_descr);
 }
 
-static inline void debug_rcu_head_free(struct rcu_head *head)
+void destroy_rcu_head(struct rcu_head *head)
 {
        debug_object_free(head, &rcuhead_debug_descr);
 }
@@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void)
 early_initcall(check_cpu_stall_init);
 
 #endif /* #ifdef CONFIG_RCU_STALL_COMMON */
-
-/*
- * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
- */
-
-DEFINE_PER_CPU(int, rcu_cond_resched_count);
-
-/*
- * Report a set of RCU quiescent states, for use by cond_resched()
- * and friends.  Out of line due to being called infrequently.
- */
-void rcu_resched(void)
-{
-       preempt_disable();
-       __this_cpu_write(rcu_cond_resched_count, 0);
-       rcu_note_context_switch(smp_processor_id());
-       preempt_enable();
-}
index 3bdf01b..bc1638b 100644 (file)
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
 
 int __sched _cond_resched(void)
 {
-       rcu_cond_resched();
        if (should_resched()) {
                __cond_resched();
                return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
  */
 int __cond_resched_lock(spinlock_t *lock)
 {
-       bool need_rcu_resched = rcu_should_resched();
        int resched = should_resched();
        int ret = 0;
 
        lockdep_assert_held(lock);
 
-       if (spin_needbreak(lock) || resched || need_rcu_resched) {
+       if (spin_needbreak(lock) || resched) {
                spin_unlock(lock);
                if (resched)
                        __cond_resched();
-               else if (unlikely(need_rcu_resched))
-                       rcu_resched();
                else
                        cpu_relax();
                ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
 {
        BUG_ON(!in_softirq());
 
-       rcu_cond_resched();  /* BH disabled OK, just recording QSes. */
        if (should_resched()) {
                local_bh_enable();
                __cond_resched();
index 695f977..627b3c3 100644 (file)
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
 
                avg_atom = p->se.sum_exec_runtime;
                if (nr_switches)
-                       do_div(avg_atom, nr_switches);
+                       avg_atom = div64_ul(avg_atom, nr_switches);
                else
                        avg_atom = -1LL;
 
index 301bbc2..2f3fa2c 100644 (file)
@@ -54,7 +54,7 @@
 struct seccomp_filter {
        atomic_t usage;
        struct seccomp_filter *prev;
-       struct sk_filter *prog;
+       struct bpf_prog *prog;
 };
 
 /* Limit any path through the tree to 256KB worth of instructions. */
@@ -87,7 +87,7 @@ static void populate_seccomp_data(struct seccomp_data *sd)
  *     @filter: filter to verify
  *     @flen: length of filter
  *
- * Takes a previously checked filter (by sk_chk_filter) and
+ * Takes a previously checked filter (by bpf_check_classic) and
  * redirects all filter code that loads struct sk_buff data
  * and related data through seccomp_bpf_load.  It also
  * enforces length and alignment checking of those loads.
@@ -187,7 +187,7 @@ static u32 seccomp_run_filters(int syscall)
         * value always takes priority (ignoring the DATA).
         */
        for (f = current->seccomp.filter; f; f = f->prev) {
-               u32 cur_ret = SK_RUN_FILTER(f->prog, (void *)&sd);
+               u32 cur_ret = BPF_PROG_RUN(f->prog, (void *)&sd);
 
                if ((cur_ret & SECCOMP_RET_ACTION) < (ret & SECCOMP_RET_ACTION))
                        ret = cur_ret;
@@ -239,7 +239,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
                goto free_prog;
 
        /* Check and rewrite the fprog via the skb checker */
-       ret = sk_chk_filter(fp, fprog->len);
+       ret = bpf_check_classic(fp, fprog->len);
        if (ret)
                goto free_prog;
 
@@ -248,8 +248,8 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        if (ret)
                goto free_prog;
 
-       /* Convert 'sock_filter' insns to 'sock_filter_int' insns */
-       ret = sk_convert_filter(fp, fprog->len, NULL, &new_len);
+       /* Convert 'sock_filter' insns to 'bpf_insn' insns */
+       ret = bpf_convert_filter(fp, fprog->len, NULL, &new_len);
        if (ret)
                goto free_prog;
 
@@ -260,12 +260,12 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        if (!filter)
                goto free_prog;
 
-       filter->prog = kzalloc(sk_filter_size(new_len),
+       filter->prog = kzalloc(bpf_prog_size(new_len),
                               GFP_KERNEL|__GFP_NOWARN);
        if (!filter->prog)
                goto free_filter;
 
-       ret = sk_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
+       ret = bpf_convert_filter(fp, fprog->len, filter->prog->insnsi, &new_len);
        if (ret)
                goto free_filter_prog;
        kfree(fp);
@@ -273,7 +273,7 @@ static long seccomp_attach_filter(struct sock_fprog *fprog)
        atomic_set(&filter->usage, 1);
        filter->prog->len = new_len;
 
-       sk_filter_select_runtime(filter->prog);
+       bpf_prog_select_runtime(filter->prog);
 
        /*
         * If there is an existing filter, make it the prev and don't drop its
@@ -337,7 +337,7 @@ void put_seccomp_filter(struct task_struct *tsk)
        while (orig && atomic_dec_and_test(&orig->usage)) {
                struct seccomp_filter *freeme = orig;
                orig = orig->prev;
-               sk_filter_free(freeme->prog);
+               bpf_prog_free(freeme->prog);
                kfree(freeme);
        }
 }
index 306f818..80c33f8 100644 (file)
@@ -29,6 +29,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
 
 static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
 
+static void flush_smp_call_function_queue(bool warn_cpu_offline);
+
 static int
 hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 {
@@ -51,12 +53,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
 #ifdef CONFIG_HOTPLUG_CPU
        case CPU_UP_CANCELED:
        case CPU_UP_CANCELED_FROZEN:
+               /* Fall-through to the CPU_DEAD[_FROZEN] case. */
 
        case CPU_DEAD:
        case CPU_DEAD_FROZEN:
                free_cpumask_var(cfd->cpumask);
                free_percpu(cfd->csd);
                break;
+
+       case CPU_DYING:
+       case CPU_DYING_FROZEN:
+               /*
+                * The IPIs for the smp-call-function callbacks queued by other
+                * CPUs might arrive late, either due to hardware latencies or
+                * because this CPU disabled interrupts (inside stop-machine)
+                * before the IPIs were sent. So flush out any pending callbacks
+                * explicitly (without waiting for the IPIs to arrive), to
+                * ensure that the outgoing CPU doesn't go offline with work
+                * still pending.
+                */
+               flush_smp_call_function_queue(false);
+               break;
 #endif
        };
 
@@ -177,23 +194,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
        return 0;
 }
 
-/*
- * Invoked by arch to handle an IPI for call function single. Must be
- * called from the arch with interrupts disabled.
+/**
+ * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
+ *
+ * Invoked by arch to handle an IPI for call function single.
+ * Must be called with interrupts disabled.
  */
 void generic_smp_call_function_single_interrupt(void)
 {
+       flush_smp_call_function_queue(true);
+}
+
+/**
+ * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
+ *
+ * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
+ *                   offline CPU. Skip this check if set to 'false'.
+ *
+ * Flush any pending smp-call-function callbacks queued on this CPU. This is
+ * invoked by the generic IPI handler, as well as by a CPU about to go offline,
+ * to ensure that all pending IPI callbacks are run before it goes completely
+ * offline.
+ *
+ * Loop through the call_single_queue and run all the queued callbacks.
+ * Must be called with interrupts disabled.
+ */
+static void flush_smp_call_function_queue(bool warn_cpu_offline)
+{
+       struct llist_head *head;
        struct llist_node *entry;
        struct call_single_data *csd, *csd_next;
        static bool warned;
 
-       entry = llist_del_all(&__get_cpu_var(call_single_queue));
+       WARN_ON(!irqs_disabled());
+
+       head = &__get_cpu_var(call_single_queue);
+       entry = llist_del_all(head);
        entry = llist_reverse_order(entry);
 
-       /*
-        * Shouldn't receive this interrupt on a cpu that is not yet online.
-        */
-       if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
+       /* There shouldn't be any pending callbacks on an offline CPU. */
+       if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
+                    !warned && !llist_empty(head))) {
                warned = true;
                WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
 
index ba9ed45..75b22e2 100644 (file)
@@ -136,7 +136,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
 /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
 static int maxolduid = 65535;
 static int minolduid;
-static int min_percpu_pagelist_fract = 8;
 
 static int ngroups_max = NGROUPS_MAX;
 static const int cap_last_cap = CAP_LAST_CAP;
@@ -152,10 +151,6 @@ static unsigned long hung_task_timeout_max = (LONG_MAX/HZ);
 #ifdef CONFIG_SPARC
 #endif
 
-#ifdef CONFIG_SPARC64
-extern int sysctl_tsb_ratio;
-#endif
-
 #ifdef __hppa__
 extern int pwrsw_enabled;
 #endif
@@ -865,6 +860,17 @@ static struct ctl_table kern_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+#ifdef CONFIG_SMP
+       {
+               .procname       = "softlockup_all_cpu_backtrace",
+               .data           = &sysctl_softlockup_all_cpu_backtrace,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &one,
+       },
+#endif /* CONFIG_SMP */
        {
                .procname       = "nmi_watchdog",
                .data           = &watchdog_user_enabled,
@@ -1321,7 +1327,7 @@ static struct ctl_table vm_table[] = {
                .maxlen         = sizeof(percpu_pagelist_fraction),
                .mode           = 0644,
                .proc_handler   = percpu_pagelist_fraction_sysctl_handler,
-               .extra1         = &min_percpu_pagelist_fract,
+               .extra1         = &zero,
        },
 #ifdef CONFIG_MMU
        {
index 653cbbd..e4ba9a5 100644 (file)
@@ -522,6 +522,7 @@ static const struct bin_table bin_net_ipv6_conf_var_table[] = {
        { CTL_INT,      NET_IPV6_ACCEPT_RA_RT_INFO_MAX_PLEN,    "accept_ra_rt_info_max_plen" },
        { CTL_INT,      NET_IPV6_PROXY_NDP,                     "proxy_ndp" },
        { CTL_INT,      NET_IPV6_ACCEPT_SOURCE_ROUTE,           "accept_source_route" },
+       { CTL_INT,      NET_IPV6_ACCEPT_RA_FROM_LOCAL,          "accept_ra_from_local" },
        {}
 };
 
index 88c9c65..fe75444 100644 (file)
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
                                struct itimerspec *new_setting,
                                struct itimerspec *old_setting)
 {
+       ktime_t exp;
+
        if (!rtcdev)
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (old_setting)
                alarm_timer_get(timr, old_setting);
 
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
 
        /* start the timer */
        timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
-       alarm_start(&timr->it.alarm.alarmtimer,
-                       timespec_to_ktime(new_setting->it_value));
+       exp = timespec_to_ktime(new_setting->it_value);
+       /* Convert (if necessary) to absolute time */
+       if (flags != TIMER_ABSTIME) {
+               ktime_t now;
+
+               now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
+               exp = ktime_add(now, exp);
+       }
+
+       alarm_start(&timr->it.alarm.alarmtimer, exp);
        return 0;
 }
 
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
        if (!alarmtimer_get_rtcdev())
                return -ENOTSUPP;
 
+       if (flags & ~TIMER_ABSTIME)
+               return -EINVAL;
+
        if (!capable(CAP_WAKE_ALARM))
                return -EPERM;
 
index 5b372e3..ac9d1da 100644 (file)
@@ -265,12 +265,12 @@ static void update_ftrace_function(void)
                func = ftrace_ops_list_func;
        }
 
+       update_function_graph_func();
+
        /* If there's no change, then do nothing more here */
        if (ftrace_trace_function == func)
                return;
 
-       update_function_graph_func();
-
        /*
         * If we are using the list function, it doesn't care
         * about the function_trace_ops.
index 7c56c3d..ff70271 100644 (file)
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
        struct ring_buffer_per_cpu *cpu_buffer;
        struct rb_irq_work *work;
 
-       if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
-           (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
-               return POLLIN | POLLRDNORM;
-
        if (cpu == RING_BUFFER_ALL_CPUS)
                work = &buffer->irq_work;
        else {
index 384ede3..291397e 100644 (file)
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        struct print_entry *entry;
        unsigned long irq_flags;
        int alloc;
+       int pc;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
                entry->buf[size] = '\0';
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return size;
 }
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
        struct bputs_entry *entry;
        unsigned long irq_flags;
        int size = sizeof(struct bputs_entry);
+       int pc;
+
+       if (!(trace_flags & TRACE_ITER_PRINTK))
+               return 0;
+
+       pc = preempt_count();
 
        if (unlikely(tracing_selftest_running || tracing_disabled))
                return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        local_save_flags(irq_flags);
        buffer = global_trace.trace_buffer.buffer;
        event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
-                                         irq_flags, preempt_count());
+                                         irq_flags, pc);
        if (!event)
                return 0;
 
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
        entry->str                      = str;
 
        __buffer_unlock_commit(buffer, event);
+       ftrace_trace_stack(buffer, irq_flags, 4, pc);
 
        return 1;
 }
@@ -809,7 +823,7 @@ static struct {
        { trace_clock_local,    "local",        1 },
        { trace_clock_global,   "global",       1 },
        { trace_clock_counter,  "counter",      0 },
-       { trace_clock_jiffies,  "uptime",       1 },
+       { trace_clock_jiffies,  "uptime",       0 },
        { trace_clock,          "perf",         1 },
        ARCH_TRACE_CLOCKS
 };
@@ -1396,7 +1410,6 @@ void tracing_start(void)
 
        arch_spin_unlock(&global_trace.max_lock);
 
-       ftrace_start();
  out:
        raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
 }
@@ -1443,7 +1456,6 @@ void tracing_stop(void)
        struct ring_buffer *buffer;
        unsigned long flags;
 
-       ftrace_stop();
        raw_spin_lock_irqsave(&global_trace.start_lock, flags);
        if (global_trace.stop_count++)
                goto out;
index 26dc348..57b67b1 100644 (file)
@@ -59,13 +59,14 @@ u64 notrace trace_clock(void)
 
 /*
  * trace_jiffy_clock(): Simply use jiffies as a clock counter.
+ * Note that this use of jiffies_64 is not completely safe on
+ * 32-bit systems. But the window is tiny, and the effect if
+ * we are affected is that we will have an obviously bogus
+ * timestamp on a trace event - i.e. not life threatening.
  */
 u64 notrace trace_clock_jiffies(void)
 {
-       u64 jiffy = jiffies - INITIAL_JIFFIES;
-
-       /* Return nsecs */
-       return (u64)jiffies_to_usecs(jiffy) * 1000ULL;
+       return jiffies_64_to_clock_t(jiffies_64 - INITIAL_JIFFIES);
 }
 
 /*
index f99e0b3..2de5362 100644 (file)
@@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
 
        list_del(&file->list);
        remove_subsystem(file->system);
+       free_event_filter(file->filter);
        kmem_cache_free(file_cachep, file);
 }
 
index 04fdb5d..3c9b97e 100644 (file)
@@ -893,6 +893,9 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
        int ret;
 
        if (file) {
+               if (tu->tp.flags & TP_FLAG_PROFILE)
+                       return -EINTR;
+
                link = kmalloc(sizeof(*link), GFP_KERNEL);
                if (!link)
                        return -ENOMEM;
@@ -901,29 +904,40 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
                list_add_tail_rcu(&link->list, &tu->tp.files);
 
                tu->tp.flags |= TP_FLAG_TRACE;
-       } else
-               tu->tp.flags |= TP_FLAG_PROFILE;
+       } else {
+               if (tu->tp.flags & TP_FLAG_TRACE)
+                       return -EINTR;
 
-       ret = uprobe_buffer_enable();
-       if (ret < 0)
-               return ret;
+               tu->tp.flags |= TP_FLAG_PROFILE;
+       }
 
        WARN_ON(!uprobe_filter_is_empty(&tu->filter));
 
        if (enabled)
                return 0;
 
+       ret = uprobe_buffer_enable();
+       if (ret)
+               goto err_flags;
+
        tu->consumer.filter = filter;
        ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
-       if (ret) {
-               if (file) {
-                       list_del(&link->list);
-                       kfree(link);
-                       tu->tp.flags &= ~TP_FLAG_TRACE;
-               } else
-                       tu->tp.flags &= ~TP_FLAG_PROFILE;
-       }
+       if (ret)
+               goto err_buffer;
 
+       return 0;
+
+ err_buffer:
+       uprobe_buffer_disable();
+
+ err_flags:
+       if (file) {
+               list_del(&link->list);
+               kfree(link);
+               tu->tp.flags &= ~TP_FLAG_TRACE;
+       } else {
+               tu->tp.flags &= ~TP_FLAG_PROFILE;
+       }
        return ret;
 }
 
@@ -1201,12 +1215,6 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
 
        current->utask->vaddr = (unsigned long) &udd;
 
-#ifdef CONFIG_PERF_EVENTS
-       if ((tu->tp.flags & TP_FLAG_TRACE) == 0 &&
-           !uprobe_perf_filter(&tu->consumer, 0, current->mm))
-               return UPROBE_HANDLER_REMOVE;
-#endif
-
        if (WARN_ON_ONCE(!uprobe_cpu_buffer))
                return 0;
 
index 33cbd8c..3490407 100644 (file)
@@ -492,33 +492,29 @@ static int sys_tracepoint_refcount;
 
 void syscall_regfunc(void)
 {
-       unsigned long flags;
-       struct task_struct *g, *t;
+       struct task_struct *p, *t;
 
        if (!sys_tracepoint_refcount) {
-               read_lock_irqsave(&tasklist_lock, flags);
-               do_each_thread(g, t) {
-                       /* Skip kernel threads. */
-                       if (t->mm)
-                               set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
-               } while_each_thread(g, t);
-               read_unlock_irqrestore(&tasklist_lock, flags);
+               read_lock(&tasklist_lock);
+               for_each_process_thread(p, t) {
+                       set_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
+               }
+               read_unlock(&tasklist_lock);
        }
        sys_tracepoint_refcount++;
 }
 
 void syscall_unregfunc(void)
 {
-       unsigned long flags;
-       struct task_struct *g, *t;
+       struct task_struct *p, *t;
 
        sys_tracepoint_refcount--;
        if (!sys_tracepoint_refcount) {
-               read_lock_irqsave(&tasklist_lock, flags);
-               do_each_thread(g, t) {
+               read_lock(&tasklist_lock);
+               for_each_process_thread(p, t) {
                        clear_tsk_thread_flag(t, TIF_SYSCALL_TRACEPOINT);
-               } while_each_thread(g, t);
-               read_unlock_irqrestore(&tasklist_lock, flags);
+               }
+               read_unlock(&tasklist_lock);
        }
 }
 #endif
index 516203e..c3319bd 100644 (file)
 
 int watchdog_user_enabled = 1;
 int __read_mostly watchdog_thresh = 10;
+#ifdef CONFIG_SMP
+int __read_mostly sysctl_softlockup_all_cpu_backtrace;
+#else
+#define sysctl_softlockup_all_cpu_backtrace 0
+#endif
+
 static int __read_mostly watchdog_running;
 static u64 __read_mostly sample_period;
 
@@ -47,6 +53,7 @@ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
 static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
+static unsigned long soft_lockup_nmi_warn;
 
 /* boot commands */
 /*
@@ -95,6 +102,15 @@ static int __init nosoftlockup_setup(char *str)
 }
 __setup("nosoftlockup", nosoftlockup_setup);
 /*  */
+#ifdef CONFIG_SMP
+static int __init softlockup_all_cpu_backtrace_setup(char *str)
+{
+       sysctl_softlockup_all_cpu_backtrace =
+               !!simple_strtol(str, NULL, 0);
+       return 1;
+}
+__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
+#endif
 
 /*
  * Hard-lockup warnings should be triggered after just a few seconds. Soft-
@@ -271,6 +287,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
        unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
        struct pt_regs *regs = get_irq_regs();
        int duration;
+       int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
 
        /* kick the hardlockup detector */
        watchdog_interrupt_count();
@@ -317,6 +334,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                if (__this_cpu_read(soft_watchdog_warn) == true)
                        return HRTIMER_RESTART;
 
+               if (softlockup_all_cpu_backtrace) {
+                       /* Prevent multiple soft-lockup reports if one cpu is already
+                        * engaged in dumping cpu back traces
+                        */
+                       if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
+                               /* Someone else will report us. Let's give up */
+                               __this_cpu_write(soft_watchdog_warn, true);
+                               return HRTIMER_RESTART;
+                       }
+               }
+
                printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
                        smp_processor_id(), duration,
                        current->comm, task_pid_nr(current));
@@ -327,6 +355,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
                else
                        dump_stack();
 
+               if (softlockup_all_cpu_backtrace) {
+                       /* Avoid generating two back traces for current
+                        * given that one is already made above
+                        */
+                       trigger_allbutself_cpu_backtrace();
+
+                       clear_bit(0, &soft_lockup_nmi_warn);
+                       /* Barrier to sync with other cpus */
+                       smp_mb__after_atomic();
+               }
+
                if (softlockup_panic)
                        panic("softlockup: hung tasks");
                __this_cpu_write(soft_watchdog_warn, true);
@@ -527,10 +566,8 @@ static void update_timers_all_cpus(void)
        int cpu;
 
        get_online_cpus();
-       preempt_disable();
        for_each_online_cpu(cpu)
                update_timers(cpu);
-       preempt_enable();
        put_online_cpus();
 }
 
index 6203d29..35974ac 100644 (file)
@@ -3284,6 +3284,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
                }
        }
 
+       dev_set_uevent_suppress(&wq_dev->dev, false);
        kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
        return 0;
 }
@@ -4879,7 +4880,7 @@ static void __init wq_numa_init(void)
        BUG_ON(!tbl);
 
        for_each_node(node)
-               BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
+               BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
                                node_online(node) ? node : NUMA_NO_NODE));
 
        for_each_possible_cpu(cpu) {
index 7cfcc1b..f11a2e8 100644 (file)
@@ -930,7 +930,7 @@ config LOCKDEP
        bool
        depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC
+       select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
        select KALLSYMS
        select KALLSYMS_ALL
 
@@ -1408,7 +1408,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
        depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
        depends on !X86_64
        select STACKTRACE
-       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC
+       select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
        help
          Provide stacktrace filter for fault-injection capabilities
 
@@ -1550,6 +1550,14 @@ config TEST_STRING_HELPERS
 config TEST_KSTRTOX
        tristate "Test kstrto*() family of functions at runtime"
 
+config TEST_RHASHTABLE
+       bool "Perform selftest on resizable hash table"
+       default n
+       help
+         Enable this option to test the rhashtable functions at boot.
+
+         If unsure, say N.
+
 endmenu # runtime tests
 
 config PROVIDE_OHCI1394_DMA_INIT
index ba967a1..fd248e4 100644 (file)
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
         bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
         gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
         bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
-        percpu-refcount.o percpu_ida.o hash.o
+        percpu-refcount.o percpu_ida.o hash.o rhashtable.o
 obj-y += string_helpers.o
 obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
 obj-y += kstrtox.o
index c101230..b6513a9 100644 (file)
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
 
        i %= num_online_cpus();
 
-       if (!cpumask_of_node(numa_node)) {
+       if (numa_node == -1 || !cpumask_of_node(numa_node)) {
                /* Use all online cpu's for non numa aware system */
                cpumask_copy(mask, cpu_online_mask);
        } else {
index 21a7b21..9a907d4 100644 (file)
@@ -50,34 +50,10 @@ MODULE_AUTHOR("Matt Domsch <Matt_Domsch@dell.com>");
 MODULE_DESCRIPTION("Various CRC32 calculations");
 MODULE_LICENSE("GPL");
 
-#define GF2_DIM                32
-
-static u32 gf2_matrix_times(u32 *mat, u32 vec)
-{
-       u32 sum = 0;
-
-       while (vec) {
-               if (vec & 1)
-                       sum ^= *mat;
-               vec >>= 1;
-               mat++;
-       }
-
-       return sum;
-}
-
-static void gf2_matrix_square(u32 *square, u32 *mat)
-{
-       int i;
-
-       for (i = 0; i < GF2_DIM; i++)
-               square[i] = gf2_matrix_times(mat, mat[i]);
-}
-
 #if CRC_LE_BITS > 8 || CRC_BE_BITS > 8
 
 /* implements slicing-by-4 or slicing-by-8 algorithm */
-static inline u32
+static inline u32 __pure
 crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
 {
 # ifdef __LITTLE_ENDIAN
@@ -155,51 +131,6 @@ crc32_body(u32 crc, unsigned char const *buf, size_t len, const u32 (*tab)[256])
 }
 #endif
 
-/* For conditions of distribution and use, see copyright notice in zlib.h */
-static u32 crc32_generic_combine(u32 crc1, u32 crc2, size_t len2,
-                                u32 polynomial)
-{
-       u32 even[GF2_DIM]; /* Even-power-of-two zeros operator */
-       u32 odd[GF2_DIM];  /* Odd-power-of-two zeros operator  */
-       u32 row;
-       int i;
-
-       if (len2 <= 0)
-               return crc1;
-
-       /* Put operator for one zero bit in odd */
-       odd[0] = polynomial;
-       row = 1;
-       for (i = 1; i < GF2_DIM; i++) {
-               odd[i] = row;
-               row <<= 1;
-       }
-
-       gf2_matrix_square(even, odd); /* Put operator for two zero bits in even */
-       gf2_matrix_square(odd, even); /* Put operator for four zero bits in odd */
-
-       /* Apply len2 zeros to crc1 (first square will put the operator for one
-        * zero byte, eight zero bits, in even).
-        */
-       do {
-               /* Apply zeros operator for this bit of len2 */
-               gf2_matrix_square(even, odd);
-               if (len2 & 1)
-                       crc1 = gf2_matrix_times(even, crc1);
-               len2 >>= 1;
-               /* If no more bits set, then done */
-               if (len2 == 0)
-                       break;
-               /* Another iteration of the loop with odd and even swapped */
-               gf2_matrix_square(odd, even);
-               if (len2 & 1)
-                       crc1 = gf2_matrix_times(odd, crc1);
-               len2 >>= 1;
-       } while (len2 != 0);
-
-       crc1 ^= crc2;
-       return crc1;
-}
 
 /**
  * crc32_le_generic() - Calculate bitwise little-endian Ethernet AUTODIN II
@@ -271,19 +202,81 @@ u32 __pure __crc32c_le(u32 crc, unsigned char const *p, size_t len)
                        (const u32 (*)[256])crc32ctable_le, CRC32C_POLY_LE);
 }
 #endif
-u32 __pure crc32_le_combine(u32 crc1, u32 crc2, size_t len2)
+EXPORT_SYMBOL(crc32_le);
+EXPORT_SYMBOL(__crc32c_le);
+
+/*
+ * This multiplies the polynomials x and y modulo the given modulus.
+ * This follows the "little-endian" CRC convention that the lsbit
+ * represents the highest power of x, and the msbit represents x^0.
+ */
+static u32 __attribute_const__ gf2_multiply(u32 x, u32 y, u32 modulus)
 {
-       return crc32_generic_combine(crc1, crc2, len2, CRCPOLY_LE);
+       u32 product = x & 1 ? y : 0;
+       int i;
+
+       for (i = 0; i < 31; i++) {
+               product = (product >> 1) ^ (product & 1 ? modulus : 0);
+               x >>= 1;
+               product ^= x & 1 ? y : 0;
+       }
+
+       return product;
 }
 
-u32 __pure __crc32c_le_combine(u32 crc1, u32 crc2, size_t len2)
+/**
+ * crc32_generic_shift - Append len 0 bytes to crc, in logarithmic time
+ * @crc: The original little-endian CRC (i.e. lsbit is x^31 coefficient)
+ * @len: The number of bytes. @crc is multiplied by x^(8*@len)
+ * @polynomial: The modulus used to reduce the result to 32 bits.
+ *
+ * It's possible to parallelize CRC computations by computing a CRC
+ * over separate ranges of a buffer, then summing them.
+ * This shifts the given CRC by 8*len bits (i.e. produces the same effect
+ * as appending len bytes of zero to the data), in time proportional
+ * to log(len).
+ */
+static u32 __attribute_const__ crc32_generic_shift(u32 crc, size_t len,
+                                                  u32 polynomial)
 {
-       return crc32_generic_combine(crc1, crc2, len2, CRC32C_POLY_LE);
+       u32 power = polynomial; /* CRC of x^32 */
+       int i;
+
+       /* Shift up to 32 bits in the simple linear way */
+       for (i = 0; i < 8 * (int)(len & 3); i++)
+               crc = (crc >> 1) ^ (crc & 1 ? polynomial : 0);
+
+       len >>= 2;
+       if (!len)
+               return crc;
+
+       for (;;) {
+               /* "power" is x^(2^i), modulo the polynomial */
+               if (len & 1)
+                       crc = gf2_multiply(crc, power, polynomial);
+
+               len >>= 1;
+               if (!len)
+                       break;
+
+               /* Square power, advancing to x^(2^(i+1)) */
+               power = gf2_multiply(power, power, polynomial);
+       }
+
+       return crc;
 }
-EXPORT_SYMBOL(crc32_le);
-EXPORT_SYMBOL(crc32_le_combine);
-EXPORT_SYMBOL(__crc32c_le);
-EXPORT_SYMBOL(__crc32c_le_combine);
+
+u32 __attribute_const__ crc32_le_shift(u32 crc, size_t len)
+{
+       return crc32_generic_shift(crc, len, CRCPOLY_LE);
+}
+
+u32 __attribute_const__ __crc32c_le_shift(u32 crc, size_t len)
+{
+       return crc32_generic_shift(crc, len, CRC32C_POLY_LE);
+}
+EXPORT_SYMBOL(crc32_le_shift);
+EXPORT_SYMBOL(__crc32c_le_shift);
 
 /**
  * crc32_be_generic() - Calculate bitwise big-endian Ethernet AUTODIN II CRC32
@@ -351,7 +344,7 @@ EXPORT_SYMBOL(crc32_be);
 #ifdef CONFIG_CRC32_SELFTEST
 
 /* 4096 random bytes */
-static u8 __attribute__((__aligned__(8))) test_buf[] =
+static u8 const __aligned(8) test_buf[] __initconst =
 {
        0x5b, 0x85, 0x21, 0xcb, 0x09, 0x68, 0x7d, 0x30,
        0xc7, 0x69, 0xd7, 0x30, 0x92, 0xde, 0x59, 0xe4,
@@ -875,7 +868,7 @@ static struct crc_test {
        u32 crc_le;     /* expected crc32_le result */
        u32 crc_be;     /* expected crc32_be result */
        u32 crc32c_le;  /* expected crc32c_le result */
-} test[] =
+} const test[] __initconst =
 {
        {0x674bf11d, 0x00000038, 0x00000542, 0x0af6d466, 0xd8b6e4c1, 0xf6e93d6c},
        {0x35c672c6, 0x0000003a, 0x000001aa, 0xc6d3dfba, 0x28aaf3ad, 0x0fe92aca},
index 7288e38..c9afbe2 100644 (file)
@@ -614,13 +614,15 @@ int __dynamic_netdev_dbg(struct _ddebug *descriptor,
                char buf[PREFIX_SIZE];
 
                res = dev_printk_emit(7, dev->dev.parent,
-                                     "%s%s %s %s: %pV",
+                                     "%s%s %s %s%s: %pV",
                                      dynamic_emit_prefix(descriptor, buf),
                                      dev_driver_string(dev->dev.parent),
                                      dev_name(dev->dev.parent),
-                                     netdev_name(dev), &vaf);
+                                     netdev_name(dev), netdev_reg_state(dev),
+                                     &vaf);
        } else if (dev) {
-               res = printk(KERN_DEBUG "%s: %pV", netdev_name(dev), &vaf);
+               res = printk(KERN_DEBUG "%s%s: %pV", netdev_name(dev),
+                            netdev_reg_state(dev), &vaf);
        } else {
                res = printk(KERN_DEBUG "(NULL net_device): %pV", &vaf);
        }
index 454baa8..7a7c2da 100644 (file)
@@ -51,3 +51,58 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
        return 0;
 }
 EXPORT_SYMBOL(memcpy_toiovec);
+
+/*
+ *     Copy kernel to iovec. Returns -EFAULT on error.
+ */
+
+int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
+                     int offset, int len)
+{
+       int copy;
+       for (; len > 0; ++iov) {
+               /* Skip over the finished iovecs */
+               if (unlikely(offset >= iov->iov_len)) {
+                       offset -= iov->iov_len;
+                       continue;
+               }
+               copy = min_t(unsigned int, iov->iov_len - offset, len);
+               if (copy_to_user(iov->iov_base + offset, kdata, copy))
+                       return -EFAULT;
+               offset = 0;
+               kdata += copy;
+               len -= copy;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_toiovecend);
+
+/*
+ *     Copy iovec to kernel. Returns -EFAULT on error.
+ */
+
+int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
+                       int offset, int len)
+{
+       /* Skip over the finished iovecs */
+       while (offset >= iov->iov_len) {
+               offset -= iov->iov_len;
+               iov++;
+       }
+
+       while (len > 0) {
+               u8 __user *base = iov->iov_base + offset;
+               int copy = min_t(unsigned int, len, iov->iov_len - offset);
+
+               offset = 0;
+               if (copy_from_user(kdata, base, copy))
+                       return -EFAULT;
+               len -= copy;
+               kdata += copy;
+               iov++;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL(memcpy_fromiovecend);
index df6839e..7a85967 100644 (file)
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
                        len = *ip++;
                        for (; len == 255; length += 255)
                                len = *ip++;
+                       if (unlikely(length > (size_t)(length + len)))
+                               goto _output_error;
                        length += len;
                }
 
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
                if (length == ML_MASK) {
                        for (; *ip == 255; length += 255)
                                ip++;
+                       if (unlikely(length > (size_t)(length + *ip)))
+                               goto _output_error;
                        length += *ip++;
                }
 
@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
 
        /* write overflow error detected */
 _output_error:
-       return (int) (-(((char *)ip) - source));
+       return -1;
 }
 
 static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
@@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
                        int s = 255;
                        while ((ip < iend) && (s == 255)) {
                                s = *ip++;
+                               if (unlikely(length > (size_t)(length + s)))
+                                       goto _output_error;
                                length += s;
                        }
                }
@@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
                if (length == ML_MASK) {
                        while (ip < iend) {
                                int s = *ip++;
+                               if (unlikely(length > (size_t)(length + s)))
+                                       goto _output_error;
                                length += s;
                                if (s == 255)
                                        continue;
@@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
 
        /* write overflow error detected */
 _output_error:
-       return (int) (-(((char *) ip) - source));
+       return -1;
 }
 
 int lz4_decompress(const unsigned char *src, size_t *src_len,
index 569985d..8563081 100644 (file)
 #include <linux/lzo.h>
 #include "lzodefs.h"
 
-#define HAVE_IP(x)      ((size_t)(ip_end - ip) >= (size_t)(x))
-#define HAVE_OP(x)      ((size_t)(op_end - op) >= (size_t)(x))
-#define NEED_IP(x)      if (!HAVE_IP(x)) goto input_overrun
-#define NEED_OP(x)      if (!HAVE_OP(x)) goto output_overrun
-#define TEST_LB(m_pos)  if ((m_pos) < out) goto lookbehind_overrun
+#define HAVE_IP(t, x)                                  \
+       (((size_t)(ip_end - ip) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define HAVE_OP(t, x)                                  \
+       (((size_t)(op_end - op) >= (size_t)(t + x)) &&  \
+        (((t + x) >= t) && ((t + x) >= x)))
+
+#define NEED_IP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_IP(t, x))                     \
+                       goto input_overrun;             \
+       } while (0)
+
+#define NEED_OP(t, x)                                  \
+       do {                                            \
+               if (!HAVE_OP(t, x))                     \
+                       goto output_overrun;            \
+       } while (0)
+
+#define TEST_LB(m_pos)                                 \
+       do {                                            \
+               if ((m_pos) < out)                      \
+                       goto lookbehind_overrun;        \
+       } while (0)
 
 int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                          unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
                                        while (unlikely(*ip == 0)) {
                                                t += 255;
                                                ip++;
-                                               NEED_IP(1);
+                                               NEED_IP(1, 0);
                                        }
                                        t += 15 + *ip++;
                                }
                                t += 3;
 copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-                               if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) {
+                               if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
                                        const unsigned char *ie = ip + t;
                                        unsigned char *oe = op + t;
                                        do {
@@ -81,8 +101,8 @@ copy_literal_run:
                                } else
 #endif
                                {
-                                       NEED_OP(t);
-                                       NEED_IP(t + 3);
+                                       NEED_OP(t, 0);
+                                       NEED_IP(t, 3);
                                        do {
                                                *op++ = *ip++;
                                        } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
                                m_pos -= t >> 2;
                                m_pos -= *ip++ << 2;
                                TEST_LB(m_pos);
-                               NEED_OP(2);
+                               NEED_OP(2, 0);
                                op[0] = m_pos[0];
                                op[1] = m_pos[1];
                                op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 31 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        m_pos = op - 1;
                        next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
                                while (unlikely(*ip == 0)) {
                                        t += 255;
                                        ip++;
-                                       NEED_IP(1);
+                                       NEED_IP(1, 0);
                                }
                                t += 7 + *ip++;
-                               NEED_IP(2);
+                               NEED_IP(2, 0);
                        }
                        next = get_unaligned_le16(ip);
                        ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
                if (op - m_pos >= 8) {
                        unsigned char *oe = op + t;
-                       if (likely(HAVE_OP(t + 15))) {
+                       if (likely(HAVE_OP(t, 15))) {
                                do {
                                        COPY8(op, m_pos);
                                        op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
                                        m_pos += 8;
                                } while (op < oe);
                                op = oe;
-                               if (HAVE_IP(6)) {
+                               if (HAVE_IP(6, 0)) {
                                        state = next;
                                        COPY4(op, ip);
                                        op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
                                        continue;
                                }
                        } else {
-                               NEED_OP(t);
+                               NEED_OP(t, 0);
                                do {
                                        *op++ = *m_pos++;
                                } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
 #endif
                {
                        unsigned char *oe = op + t;
-                       NEED_OP(t);
+                       NEED_OP(t, 0);
                        op[0] = m_pos[0];
                        op[1] = m_pos[1];
                        op += 2;
@@ -194,15 +214,15 @@ match_next:
                state = next;
                t = next;
 #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               if (likely(HAVE_IP(6) && HAVE_OP(4))) {
+               if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
                        COPY4(op, ip);
                        op += t;
                        ip += t;
                } else
 #endif
                {
-                       NEED_IP(t + 3);
-                       NEED_OP(t);
+                       NEED_IP(t, 3);
+                       NEED_OP(t, 0);
                        while (t > 0) {
                                *op++ = *ip++;
                                t--;
index 2e3c52c..148fc6e 100644 (file)
@@ -3,24 +3,24 @@
 #include <linux/ctype.h>
 #include <linux/kernel.h>
 
-int mac_pton(const char *s, u8 *mac)
+bool mac_pton(const char *s, u8 *mac)
 {
        int i;
 
        /* XX:XX:XX:XX:XX:XX */
        if (strlen(s) < 3 * ETH_ALEN - 1)
-               return 0;
+               return false;
 
        /* Don't dirty result unless string is valid MAC. */
        for (i = 0; i < ETH_ALEN; i++) {
                if (!isxdigit(s[i * 3]) || !isxdigit(s[i * 3 + 1]))
-                       return 0;
+                       return false;
                if (i != ETH_ALEN - 1 && s[i * 3 + 2] != ':')
-                       return 0;
+                       return false;
        }
        for (i = 0; i < ETH_ALEN; i++) {
                mac[i] = (hex_to_bin(s[i * 3]) << 4) | hex_to_bin(s[i * 3 + 1]);
        }
-       return 1;
+       return true;
 }
 EXPORT_SYMBOL(mac_pton);
index fa5da61..c9b6bf3 100644 (file)
 
 #ifdef CONFIG_RANDOM32_SELFTEST
 static void __init prandom_state_selftest(void);
+#else
+static inline void prandom_state_selftest(void)
+{
+}
 #endif
 
 static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
@@ -53,8 +57,7 @@ static DEFINE_PER_CPU(struct rnd_state, net_rand_state);
  */
 u32 prandom_u32_state(struct rnd_state *state)
 {
-#define TAUSWORTHE(s,a,b,c,d) ((s&c)<<d) ^ (((s <<a) ^ s)>>b)
-
+#define TAUSWORTHE(s, a, b, c, d) ((s & c) << d) ^ (((s << a) ^ s) >> b)
        state->s1 = TAUSWORTHE(state->s1,  6U, 13U, 4294967294U, 18U);
        state->s2 = TAUSWORTHE(state->s2,  2U, 27U, 4294967288U,  2U);
        state->s3 = TAUSWORTHE(state->s3, 13U, 21U, 4294967280U,  7U);
@@ -147,21 +150,25 @@ static void prandom_warmup(struct rnd_state *state)
        prandom_u32_state(state);
 }
 
-static void prandom_seed_very_weak(struct rnd_state *state, u32 seed)
+static u32 __extract_hwseed(void)
 {
-       /* Note: This sort of seeding is ONLY used in test cases and
-        * during boot at the time from core_initcall until late_initcall
-        * as we don't have a stronger entropy source available yet.
-        * After late_initcall, we reseed entire state, we have to (!),
-        * otherwise an attacker just needs to search 32 bit space to
-        * probe for our internal 128 bit state if he knows a couple
-        * of prandom32 outputs!
-        */
-#define LCG(x) ((x) * 69069U)  /* super-duper LCG */
-       state->s1 = __seed(LCG(seed),        2U);
-       state->s2 = __seed(LCG(state->s1),   8U);
-       state->s3 = __seed(LCG(state->s2),  16U);
-       state->s4 = __seed(LCG(state->s3), 128U);
+       u32 val = 0;
+
+       (void)(arch_get_random_seed_int(&val) ||
+              arch_get_random_int(&val));
+
+       return val;
+}
+
+static void prandom_seed_early(struct rnd_state *state, u32 seed,
+                              bool mix_with_hwseed)
+{
+#define LCG(x)  ((x) * 69069U) /* super-duper LCG */
+#define HWSEED() (mix_with_hwseed ? __extract_hwseed() : 0)
+       state->s1 = __seed(HWSEED() ^ LCG(seed),        2U);
+       state->s2 = __seed(HWSEED() ^ LCG(state->s1),   8U);
+       state->s3 = __seed(HWSEED() ^ LCG(state->s2),  16U);
+       state->s4 = __seed(HWSEED() ^ LCG(state->s3), 128U);
 }
 
 /**
@@ -194,14 +201,13 @@ static int __init prandom_init(void)
 {
        int i;
 
-#ifdef CONFIG_RANDOM32_SELFTEST
        prandom_state_selftest();
-#endif
 
        for_each_possible_cpu(i) {
                struct rnd_state *state = &per_cpu(net_rand_state,i);
+               u32 weak_seed = (i + jiffies) ^ random_get_entropy();
 
-               prandom_seed_very_weak(state, (i + jiffies) ^ random_get_entropy());
+               prandom_seed_early(state, weak_seed, true);
                prandom_warmup(state);
        }
 
@@ -210,6 +216,7 @@ static int __init prandom_init(void)
 core_initcall(prandom_init);
 
 static void __prandom_timer(unsigned long dontcare);
+
 static DEFINE_TIMER(seed_timer, __prandom_timer, 0, 0);
 
 static void __prandom_timer(unsigned long dontcare)
@@ -419,7 +426,7 @@ static void __init prandom_state_selftest(void)
        for (i = 0; i < ARRAY_SIZE(test1); i++) {
                struct rnd_state state;
 
-               prandom_seed_very_weak(&state, test1[i].seed);
+               prandom_seed_early(&state, test1[i].seed, false);
                prandom_warmup(&state);
 
                if (test1[i].result != prandom_u32_state(&state))
@@ -434,7 +441,7 @@ static void __init prandom_state_selftest(void)
        for (i = 0; i < ARRAY_SIZE(test2); i++) {
                struct rnd_state state;
 
-               prandom_seed_very_weak(&state, test2[i].seed);
+               prandom_seed_early(&state, test2[i].seed, false);
                prandom_warmup(&state);
 
                for (j = 0; j < test2[i].iteration - 1; j++)
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
new file mode 100644 (file)
index 0000000..e6940cf
--- /dev/null
@@ -0,0 +1,797 @@
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Copyright (c) 2014 Thomas Graf <tgraf@suug.ch>
+ * Copyright (c) 2008-2014 Patrick McHardy <kaber@trash.net>
+ *
+ * Based on the following paper:
+ * https://www.usenix.org/legacy/event/atc11/tech/final_files/Triplett.pdf
+ *
+ * Code partially derived from nft_hash
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/mm.h>
+#include <linux/hash.h>
+#include <linux/random.h>
+#include <linux/rhashtable.h>
+#include <linux/log2.h>
+
+#define HASH_DEFAULT_SIZE      64UL
+#define HASH_MIN_SIZE          4UL
+
+#define ASSERT_RHT_MUTEX(HT) BUG_ON(!lockdep_rht_mutex_is_held(HT))
+
+#ifdef CONFIG_PROVE_LOCKING
+int lockdep_rht_mutex_is_held(const struct rhashtable *ht)
+{
+       return ht->p.mutex_is_held();
+}
+EXPORT_SYMBOL_GPL(lockdep_rht_mutex_is_held);
+#endif
+
+/**
+ * rht_obj - cast hash head to outer object
+ * @ht:                hash table
+ * @he:                hashed node
+ */
+void *rht_obj(const struct rhashtable *ht, const struct rhash_head *he)
+{
+       return (void *) he - ht->p.head_offset;
+}
+EXPORT_SYMBOL_GPL(rht_obj);
+
+static u32 __hashfn(const struct rhashtable *ht, const void *key,
+                     u32 len, u32 hsize)
+{
+       u32 h;
+
+       h = ht->p.hashfn(key, len, ht->p.hash_rnd);
+
+       return h & (hsize - 1);
+}
+
+/**
+ * rhashtable_hashfn - compute hash for key of given length
+ * @ht:                hash table to compuate for
+ * @key:       pointer to key
+ * @len:       length of key
+ *
+ * Computes the hash value using the hash function provided in the 'hashfn'
+ * of struct rhashtable_params. The returned value is guaranteed to be
+ * smaller than the number of buckets in the hash table.
+ */
+u32 rhashtable_hashfn(const struct rhashtable *ht, const void *key, u32 len)
+{
+       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+       return __hashfn(ht, key, len, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_hashfn);
+
+static u32 obj_hashfn(const struct rhashtable *ht, const void *ptr, u32 hsize)
+{
+       if (unlikely(!ht->p.key_len)) {
+               u32 h;
+
+               h = ht->p.obj_hashfn(ptr, ht->p.hash_rnd);
+
+               return h & (hsize - 1);
+       }
+
+       return __hashfn(ht, ptr + ht->p.key_offset, ht->p.key_len, hsize);
+}
+
+/**
+ * rhashtable_obj_hashfn - compute hash for hashed object
+ * @ht:                hash table to compuate for
+ * @ptr:       pointer to hashed object
+ *
+ * Computes the hash value using the hash function `hashfn` respectively
+ * 'obj_hashfn' depending on whether the hash table is set up to work with
+ * a fixed length key. The returned value is guaranteed to be smaller than
+ * the number of buckets in the hash table.
+ */
+u32 rhashtable_obj_hashfn(const struct rhashtable *ht, void *ptr)
+{
+       struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+       return obj_hashfn(ht, ptr, tbl->size);
+}
+EXPORT_SYMBOL_GPL(rhashtable_obj_hashfn);
+
+static u32 head_hashfn(const struct rhashtable *ht,
+                      const struct rhash_head *he, u32 hsize)
+{
+       return obj_hashfn(ht, rht_obj(ht, he), hsize);
+}
+
+static struct bucket_table *bucket_table_alloc(size_t nbuckets, gfp_t flags)
+{
+       struct bucket_table *tbl;
+       size_t size;
+
+       size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
+       tbl = kzalloc(size, flags);
+       if (tbl == NULL)
+               tbl = vzalloc(size);
+
+       if (tbl == NULL)
+               return NULL;
+
+       tbl->size = nbuckets;
+
+       return tbl;
+}
+
+static void bucket_table_free(const struct bucket_table *tbl)
+{
+       kvfree(tbl);
+}
+
+/**
+ * rht_grow_above_75 - returns true if nelems > 0.75 * table-size
+ * @ht:                hash table
+ * @new_size:  new table size
+ */
+bool rht_grow_above_75(const struct rhashtable *ht, size_t new_size)
+{
+       /* Expand table when exceeding 75% load */
+       return ht->nelems > (new_size / 4 * 3);
+}
+EXPORT_SYMBOL_GPL(rht_grow_above_75);
+
+/**
+ * rht_shrink_below_30 - returns true if nelems < 0.3 * table-size
+ * @ht:                hash table
+ * @new_size:  new table size
+ */
+bool rht_shrink_below_30(const struct rhashtable *ht, size_t new_size)
+{
+       /* Shrink table beneath 30% load */
+       return ht->nelems < (new_size * 3 / 10);
+}
+EXPORT_SYMBOL_GPL(rht_shrink_below_30);
+
+static void hashtable_chain_unzip(const struct rhashtable *ht,
+                                 const struct bucket_table *new_tbl,
+                                 struct bucket_table *old_tbl, size_t n)
+{
+       struct rhash_head *he, *p, *next;
+       unsigned int h;
+
+       /* Old bucket empty, no work needed. */
+       p = rht_dereference(old_tbl->buckets[n], ht);
+       if (!p)
+               return;
+
+       /* Advance the old bucket pointer one or more times until it
+        * reaches a node that doesn't hash to the same bucket as the
+        * previous node p. Call the previous node p;
+        */
+       h = head_hashfn(ht, p, new_tbl->size);
+       rht_for_each(he, p->next, ht) {
+               if (head_hashfn(ht, he, new_tbl->size) != h)
+                       break;
+               p = he;
+       }
+       RCU_INIT_POINTER(old_tbl->buckets[n], p->next);
+
+       /* Find the subsequent node which does hash to the same
+        * bucket as node P, or NULL if no such node exists.
+        */
+       next = NULL;
+       if (he) {
+               rht_for_each(he, he->next, ht) {
+                       if (head_hashfn(ht, he, new_tbl->size) == h) {
+                               next = he;
+                               break;
+                       }
+               }
+       }
+
+       /* Set p's next pointer to that subsequent node pointer,
+        * bypassing the nodes which do not hash to p's bucket
+        */
+       RCU_INIT_POINTER(p->next, next);
+}
+
+/**
+ * rhashtable_expand - Expand hash table while allowing concurrent lookups
+ * @ht:                the hash table to expand
+ * @flags:     allocation flags
+ *
+ * A secondary bucket array is allocated and the hash entries are migrated
+ * while keeping them on both lists until the end of the RCU grace period.
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
+{
+       struct bucket_table *new_tbl, *old_tbl = rht_dereference(ht->tbl, ht);
+       struct rhash_head *he;
+       unsigned int i, h;
+       bool complete;
+
+       ASSERT_RHT_MUTEX(ht);
+
+       if (ht->p.max_shift && ht->shift >= ht->p.max_shift)
+               return 0;
+
+       new_tbl = bucket_table_alloc(old_tbl->size * 2, flags);
+       if (new_tbl == NULL)
+               return -ENOMEM;
+
+       ht->shift++;
+
+       /* For each new bucket, search the corresponding old bucket
+        * for the first entry that hashes to the new bucket, and
+        * link the new bucket to that entry. Since all the entries
+        * which will end up in the new bucket appear in the same
+        * old bucket, this constructs an entirely valid new hash
+        * table, but with multiple buckets "zipped" together into a
+        * single imprecise chain.
+        */
+       for (i = 0; i < new_tbl->size; i++) {
+               h = i & (old_tbl->size - 1);
+               rht_for_each(he, old_tbl->buckets[h], ht) {
+                       if (head_hashfn(ht, he, new_tbl->size) == i) {
+                               RCU_INIT_POINTER(new_tbl->buckets[i], he);
+                               break;
+                       }
+               }
+       }
+
+       /* Publish the new table pointer. Lookups may now traverse
+        * the new table, but they will not benefit from any
+        * additional efficiency until later steps unzip the buckets.
+        */
+       rcu_assign_pointer(ht->tbl, new_tbl);
+
+       /* Unzip interleaved hash chains */
+       do {
+               /* Wait for readers. All new readers will see the new
+                * table, and thus no references to the old table will
+                * remain.
+                */
+               synchronize_rcu();
+
+               /* For each bucket in the old table (each of which
+                * contains items from multiple buckets of the new
+                * table): ...
+                */
+               complete = true;
+               for (i = 0; i < old_tbl->size; i++) {
+                       hashtable_chain_unzip(ht, new_tbl, old_tbl, i);
+                       if (old_tbl->buckets[i] != NULL)
+                               complete = false;
+               }
+       } while (!complete);
+
+       bucket_table_free(old_tbl);
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_expand);
+
+/**
+ * rhashtable_shrink - Shrink hash table while allowing concurrent lookups
+ * @ht:                the hash table to shrink
+ * @flags:     allocation flags
+ *
+ * This function may only be called in a context where it is safe to call
+ * synchronize_rcu(), e.g. not within a rcu_read_lock() section.
+ *
+ * The caller must ensure that no concurrent table mutations take place.
+ * It is however valid to have concurrent lookups if they are RCU protected.
+ */
+int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
+{
+       struct bucket_table *ntbl, *tbl = rht_dereference(ht->tbl, ht);
+       struct rhash_head __rcu **pprev;
+       unsigned int i;
+
+       ASSERT_RHT_MUTEX(ht);
+
+       if (tbl->size <= HASH_MIN_SIZE)
+               return 0;
+
+       ntbl = bucket_table_alloc(tbl->size / 2, flags);
+       if (ntbl == NULL)
+               return -ENOMEM;
+
+       ht->shift--;
+
+       /* Link each bucket in the new table to the first bucket
+        * in the old table that contains entries which will hash
+        * to the new bucket.
+        */
+       for (i = 0; i < ntbl->size; i++) {
+               ntbl->buckets[i] = tbl->buckets[i];
+
+               /* Link each bucket in the new table to the first bucket
+                * in the old table that contains entries which will hash
+                * to the new bucket.
+                */
+               for (pprev = &ntbl->buckets[i]; *pprev != NULL;
+                    pprev = &rht_dereference(*pprev, ht)->next)
+                       ;
+               RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
+       }
+
+       /* Publish the new, valid hash table */
+       rcu_assign_pointer(ht->tbl, ntbl);
+
+       /* Wait for readers. No new readers will have references to the
+        * old hash table.
+        */
+       synchronize_rcu();
+
+       bucket_table_free(tbl);
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_shrink);
+
+/**
+ * rhashtable_insert - insert object into hash hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @flags:     allocation flags (table expansion)
+ *
+ * Will automatically grow the table via rhashtable_expand() if the the
+ * grow_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+void rhashtable_insert(struct rhashtable *ht, struct rhash_head *obj,
+                      gfp_t flags)
+{
+       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+       u32 hash;
+
+       ASSERT_RHT_MUTEX(ht);
+
+       hash = head_hashfn(ht, obj, tbl->size);
+       RCU_INIT_POINTER(obj->next, tbl->buckets[hash]);
+       rcu_assign_pointer(tbl->buckets[hash], obj);
+       ht->nelems++;
+
+       if (ht->p.grow_decision && ht->p.grow_decision(ht, tbl->size))
+               rhashtable_expand(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_insert);
+
+/**
+ * rhashtable_remove_pprev - remove object from hash table given previous element
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @pprev:     pointer to previous element
+ * @flags:     allocation flags (table expansion)
+ *
+ * Identical to rhashtable_remove() but caller is alreayd aware of the element
+ * in front of the element to be deleted. This is in particular useful for
+ * deletion when combined with walking or lookup.
+ */
+void rhashtable_remove_pprev(struct rhashtable *ht, struct rhash_head *obj,
+                            struct rhash_head **pprev, gfp_t flags)
+{
+       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+
+       ASSERT_RHT_MUTEX(ht);
+
+       RCU_INIT_POINTER(*pprev, obj->next);
+       ht->nelems--;
+
+       if (ht->p.shrink_decision &&
+           ht->p.shrink_decision(ht, tbl->size))
+               rhashtable_shrink(ht, flags);
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove_pprev);
+
+/**
+ * rhashtable_remove - remove object from hash table
+ * @ht:                hash table
+ * @obj:       pointer to hash head inside object
+ * @flags:     allocation flags (table expansion)
+ *
+ * Since the hash chain is single linked, the removal operation needs to
+ * walk the bucket chain upon removal. The removal operation is thus
+ * considerable slow if the hash table is not correctly sized.
+ *
+ * Will automatically shrink the table via rhashtable_expand() if the the
+ * shrink_decision function specified at rhashtable_init() returns true.
+ *
+ * The caller must ensure that no concurrent table mutations occur. It is
+ * however valid to have concurrent lookups if they are RCU protected.
+ */
+bool rhashtable_remove(struct rhashtable *ht, struct rhash_head *obj,
+                      gfp_t flags)
+{
+       struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+       struct rhash_head __rcu **pprev;
+       struct rhash_head *he;
+       u32 h;
+
+       ASSERT_RHT_MUTEX(ht);
+
+       h = head_hashfn(ht, obj, tbl->size);
+
+       pprev = &tbl->buckets[h];
+       rht_for_each(he, tbl->buckets[h], ht) {
+               if (he != obj) {
+                       pprev = &he->next;
+                       continue;
+               }
+
+               rhashtable_remove_pprev(ht, he, pprev, flags);
+               return true;
+       }
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(rhashtable_remove);
+
+/**
+ * rhashtable_lookup - lookup key in hash table
+ * @ht:                hash table
+ * @key:       pointer to key
+ *
+ * Computes the hash value for the key and traverses the bucket chain looking
+ * for a entry with an identical key. The first matching entry is returned.
+ *
+ * This lookup function may only be used for fixed key hash table (key_len
+ * paramter set). It will BUG() if used inappropriately.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ */
+void *rhashtable_lookup(const struct rhashtable *ht, const void *key)
+{
+       const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       struct rhash_head *he;
+       u32 h;
+
+       BUG_ON(!ht->p.key_len);
+
+       h = __hashfn(ht, key, ht->p.key_len, tbl->size);
+       rht_for_each_rcu(he, tbl->buckets[h], ht) {
+               if (memcmp(rht_obj(ht, he) + ht->p.key_offset, key,
+                          ht->p.key_len))
+                       continue;
+               return (void *) he - ht->p.head_offset;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup);
+
+/**
+ * rhashtable_lookup_compare - search hash table with compare function
+ * @ht:                hash table
+ * @hash:      hash value of desired entry
+ * @compare:   compare function, must return true on match
+ * @arg:       argument passed on to compare function
+ *
+ * Traverses the bucket chain behind the provided hash value and calls the
+ * specified compare function for each entry.
+ *
+ * Lookups may occur in parallel with hash mutations as long as the lookup is
+ * guarded by rcu_read_lock(). The caller must take care of this.
+ *
+ * Returns the first entry on which the compare function returned true.
+ */
+void *rhashtable_lookup_compare(const struct rhashtable *ht, u32 hash,
+                               bool (*compare)(void *, void *), void *arg)
+{
+       const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+       struct rhash_head *he;
+
+       if (unlikely(hash >= tbl->size))
+               return NULL;
+
+       rht_for_each_rcu(he, tbl->buckets[hash], ht) {
+               if (!compare(rht_obj(ht, he), arg))
+                       continue;
+               return (void *) he - ht->p.head_offset;
+       }
+
+       return NULL;
+}
+EXPORT_SYMBOL_GPL(rhashtable_lookup_compare);
+
+static size_t rounded_hashtable_size(unsigned int nelem)
+{
+       return max(roundup_pow_of_two(nelem * 4 / 3), HASH_MIN_SIZE);
+}
+
+/**
+ * rhashtable_init - initialize a new hash table
+ * @ht:                hash table to be initialized
+ * @params:    configuration parameters
+ *
+ * Initializes a new hash table based on the provided configuration
+ * parameters. A table can be configured either with a variable or
+ * fixed length key:
+ *
+ * Configuration Example 1: Fixed length keys
+ * struct test_obj {
+ *     int                     key;
+ *     void *                  my_member;
+ *     struct rhash_head       node;
+ * };
+ *
+ * struct rhashtable_params params = {
+ *     .head_offset = offsetof(struct test_obj, node),
+ *     .key_offset = offsetof(struct test_obj, key),
+ *     .key_len = sizeof(int),
+ *     .hashfn = arch_fast_hash,
+ *     .mutex_is_held = &my_mutex_is_held,
+ * };
+ *
+ * Configuration Example 2: Variable length keys
+ * struct test_obj {
+ *     [...]
+ *     struct rhash_head       node;
+ * };
+ *
+ * u32 my_hash_fn(const void *data, u32 seed)
+ * {
+ *     struct test_obj *obj = data;
+ *
+ *     return [... hash ...];
+ * }
+ *
+ * struct rhashtable_params params = {
+ *     .head_offset = offsetof(struct test_obj, node),
+ *     .hashfn = arch_fast_hash,
+ *     .obj_hashfn = my_hash_fn,
+ *     .mutex_is_held = &my_mutex_is_held,
+ * };
+ */
+int rhashtable_init(struct rhashtable *ht, struct rhashtable_params *params)
+{
+       struct bucket_table *tbl;
+       size_t size;
+
+       size = HASH_DEFAULT_SIZE;
+
+       if ((params->key_len && !params->hashfn) ||
+           (!params->key_len && !params->obj_hashfn))
+               return -EINVAL;
+
+       if (params->nelem_hint)
+               size = rounded_hashtable_size(params->nelem_hint);
+
+       tbl = bucket_table_alloc(size, GFP_KERNEL);
+       if (tbl == NULL)
+               return -ENOMEM;
+
+       memset(ht, 0, sizeof(*ht));
+       ht->shift = ilog2(tbl->size);
+       memcpy(&ht->p, params, sizeof(*params));
+       RCU_INIT_POINTER(ht->tbl, tbl);
+
+       if (!ht->p.hash_rnd)
+               get_random_bytes(&ht->p.hash_rnd, sizeof(ht->p.hash_rnd));
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(rhashtable_init);
+
+/**
+ * rhashtable_destroy - destroy hash table
+ * @ht:                the hash table to destroy
+ *
+ * Frees the bucket array.
+ */
+void rhashtable_destroy(const struct rhashtable *ht)
+{
+       const struct bucket_table *tbl = rht_dereference(ht->tbl, ht);
+
+       bucket_table_free(tbl);
+}
+EXPORT_SYMBOL_GPL(rhashtable_destroy);
+
+/**************************************************************************
+ * Self Test
+ **************************************************************************/
+
+#ifdef CONFIG_TEST_RHASHTABLE
+
+#define TEST_HT_SIZE   8
+#define TEST_ENTRIES   2048
+#define TEST_PTR       ((void *) 0xdeadbeef)
+#define TEST_NEXPANDS  4
+
+static int test_mutex_is_held(void)
+{
+       return 1;
+}
+
+struct test_obj {
+       void                    *ptr;
+       int                     value;
+       struct rhash_head       node;
+};
+
+static int __init test_rht_lookup(struct rhashtable *ht)
+{
+       unsigned int i;
+
+       for (i = 0; i < TEST_ENTRIES * 2; i++) {
+               struct test_obj *obj;
+               bool expected = !(i % 2);
+               u32 key = i;
+
+               obj = rhashtable_lookup(ht, &key);
+
+               if (expected && !obj) {
+                       pr_warn("Test failed: Could not find key %u\n", key);
+                       return -ENOENT;
+               } else if (!expected && obj) {
+                       pr_warn("Test failed: Unexpected entry found for key %u\n",
+                               key);
+                       return -EEXIST;
+               } else if (expected && obj) {
+                       if (obj->ptr != TEST_PTR || obj->value != i) {
+                               pr_warn("Test failed: Lookup value mismatch %p!=%p, %u!=%u\n",
+                                       obj->ptr, TEST_PTR, obj->value, i);
+                               return -EINVAL;
+                       }
+               }
+       }
+
+       return 0;
+}
+
+static void test_bucket_stats(struct rhashtable *ht,
+                                    struct bucket_table *tbl,
+                                    bool quiet)
+{
+       unsigned int cnt, i, total = 0;
+       struct test_obj *obj;
+
+       for (i = 0; i < tbl->size; i++) {
+               cnt = 0;
+
+               if (!quiet)
+                       pr_info(" [%#4x/%zu]", i, tbl->size);
+
+               rht_for_each_entry_rcu(obj, tbl->buckets[i], node) {
+                       cnt++;
+                       total++;
+                       if (!quiet)
+                               pr_cont(" [%p],", obj);
+               }
+
+               if (!quiet)
+                       pr_cont("\n  [%#x] first element: %p, chain length: %u\n",
+                               i, tbl->buckets[i], cnt);
+       }
+
+       pr_info("  Traversal complete: counted=%u, nelems=%zu, entries=%d\n",
+               total, ht->nelems, TEST_ENTRIES);
+}
+
+static int __init test_rhashtable(struct rhashtable *ht)
+{
+       struct bucket_table *tbl;
+       struct test_obj *obj, *next;
+       int err;
+       unsigned int i;
+
+       /*
+        * Insertion Test:
+        * Insert TEST_ENTRIES into table with all keys even numbers
+        */
+       pr_info("  Adding %d keys\n", TEST_ENTRIES);
+       for (i = 0; i < TEST_ENTRIES; i++) {
+               struct test_obj *obj;
+
+               obj = kzalloc(sizeof(*obj), GFP_KERNEL);
+               if (!obj) {
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               obj->ptr = TEST_PTR;
+               obj->value = i * 2;
+
+               rhashtable_insert(ht, &obj->node, GFP_KERNEL);
+       }
+
+       rcu_read_lock();
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+       test_bucket_stats(ht, tbl, true);
+       test_rht_lookup(ht);
+       rcu_read_unlock();
+
+       for (i = 0; i < TEST_NEXPANDS; i++) {
+               pr_info("  Table expansion iteration %u...\n", i);
+               rhashtable_expand(ht, GFP_KERNEL);
+
+               rcu_read_lock();
+               pr_info("  Verifying lookups...\n");
+               test_rht_lookup(ht);
+               rcu_read_unlock();
+       }
+
+       for (i = 0; i < TEST_NEXPANDS; i++) {
+               pr_info("  Table shrinkage iteration %u...\n", i);
+               rhashtable_shrink(ht, GFP_KERNEL);
+
+               rcu_read_lock();
+               pr_info("  Verifying lookups...\n");
+               test_rht_lookup(ht);
+               rcu_read_unlock();
+       }
+
+       pr_info("  Deleting %d keys\n", TEST_ENTRIES);
+       for (i = 0; i < TEST_ENTRIES; i++) {
+               u32 key = i * 2;
+
+               obj = rhashtable_lookup(ht, &key);
+               BUG_ON(!obj);
+
+               rhashtable_remove(ht, &obj->node, GFP_KERNEL);
+               kfree(obj);
+       }
+
+       return 0;
+
+error:
+       tbl = rht_dereference_rcu(ht->tbl, ht);
+       for (i = 0; i < tbl->size; i++)
+               rht_for_each_entry_safe(obj, next, tbl->buckets[i], ht, node)
+                       kfree(obj);
+
+       return err;
+}
+
+static int __init test_rht_init(void)
+{
+       struct rhashtable ht;
+       struct rhashtable_params params = {
+               .nelem_hint = TEST_HT_SIZE,
+               .head_offset = offsetof(struct test_obj, node),
+               .key_offset = offsetof(struct test_obj, value),
+               .key_len = sizeof(int),
+               .hashfn = arch_fast_hash,
+               .mutex_is_held = &test_mutex_is_held,
+               .grow_decision = rht_grow_above_75,
+               .shrink_decision = rht_shrink_below_30,
+       };
+       int err;
+
+       pr_info("Running resizable hashtable tests...\n");
+
+       err = rhashtable_init(&ht, &params);
+       if (err < 0) {
+               pr_warn("Test failed: Unable to initialize hashtable: %d\n",
+                       err);
+               return err;
+       }
+
+       err = test_rhashtable(&ht);
+
+       rhashtable_destroy(&ht);
+
+       return err;
+}
+
+subsys_initcall(test_rht_init);
+
+#endif /* CONFIG_TEST_RHASHTABLE */
index 649d097..4abda07 100644 (file)
@@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
  * We need to save away the original address corresponding to a mapped entry
  * for the sync operations.
  */
+#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
 static phys_addr_t *io_tlb_orig_addr;
 
 /*
@@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
        io_tlb_list = memblock_virt_alloc(
                                PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
                                PAGE_SIZE);
-       for (i = 0; i < io_tlb_nslabs; i++)
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       io_tlb_index = 0;
        io_tlb_orig_addr = memblock_virt_alloc(
                                PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
                                PAGE_SIZE);
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
 
        if (verbose)
                swiotlb_print_info();
@@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
        if (!io_tlb_list)
                goto cleanup3;
 
-       for (i = 0; i < io_tlb_nslabs; i++)
-               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
-       io_tlb_index = 0;
-
        io_tlb_orig_addr = (phys_addr_t *)
                __get_free_pages(GFP_KERNEL,
                                 get_order(io_tlb_nslabs *
@@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
        if (!io_tlb_orig_addr)
                goto cleanup4;
 
-       memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t));
+       for (i = 0; i < io_tlb_nslabs; i++) {
+               io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
+               io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+       }
+       io_tlb_index = 0;
 
        swiotlb_print_info();
 
@@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
        /*
         * First, sync the memory before unmapping the entry
         */
-       if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
+       if (orig_addr != INVALID_PHYS_ADDR &&
+           ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
                swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
 
        /*
@@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
                 * Step 1: return the slots to the free list, merging the
                 * slots with superceeding slots
                 */
-               for (i = index + nslots - 1; i >= index; i--)
+               for (i = index + nslots - 1; i >= index; i--) {
                        io_tlb_list[i] = ++count;
+                       io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
+               }
                /*
                 * Step 2: merge the returned slots with the preceding slots,
                 * if available (non zero)
@@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
        int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
        phys_addr_t orig_addr = io_tlb_orig_addr[index];
 
+       if (orig_addr == INVALID_PHYS_ADDR)
+               return;
        orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
 
        switch (target) {
index c579e0f..89e0345 100644 (file)
@@ -66,7 +66,7 @@ struct bpf_test {
        const char *descr;
        union {
                struct sock_filter insns[MAX_INSNS];
-               struct sock_filter_int insns_int[MAX_INSNS];
+               struct bpf_insn insns_int[MAX_INSNS];
        } u;
        __u8 aux;
        __u8 data[MAX_DATA];
@@ -1761,9 +1761,9 @@ static int probe_filter_length(struct sock_filter *fp)
        return len + 1;
 }
 
-static struct sk_filter *generate_filter(int which, int *err)
+static struct bpf_prog *generate_filter(int which, int *err)
 {
-       struct sk_filter *fp;
+       struct bpf_prog *fp;
        struct sock_fprog_kern fprog;
        unsigned int flen = probe_filter_length(tests[which].u.insns);
        __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
@@ -1773,7 +1773,7 @@ static struct sk_filter *generate_filter(int which, int *err)
                fprog.filter = tests[which].u.insns;
                fprog.len = flen;
 
-               *err = sk_unattached_filter_create(&fp, &fprog);
+               *err = bpf_prog_create(&fp, &fprog);
                if (tests[which].aux & FLAG_EXPECTED_FAIL) {
                        if (*err == -EINVAL) {
                                pr_cont("PASS\n");
@@ -1798,7 +1798,7 @@ static struct sk_filter *generate_filter(int which, int *err)
                break;
 
        case INTERNAL:
-               fp = kzalloc(sk_filter_size(flen), GFP_KERNEL);
+               fp = kzalloc(bpf_prog_size(flen), GFP_KERNEL);
                if (fp == NULL) {
                        pr_cont("UNEXPECTED_FAIL no memory left\n");
                        *err = -ENOMEM;
@@ -1807,9 +1807,9 @@ static struct sk_filter *generate_filter(int which, int *err)
 
                fp->len = flen;
                memcpy(fp->insnsi, tests[which].u.insns_int,
-                      fp->len * sizeof(struct sock_filter_int));
+                      fp->len * sizeof(struct bpf_insn));
 
-               sk_filter_select_runtime(fp);
+               bpf_prog_select_runtime(fp);
                break;
        }
 
@@ -1817,21 +1817,21 @@ static struct sk_filter *generate_filter(int which, int *err)
        return fp;
 }
 
-static void release_filter(struct sk_filter *fp, int which)
+static void release_filter(struct bpf_prog *fp, int which)
 {
        __u8 test_type = tests[which].aux & TEST_TYPE_MASK;
 
        switch (test_type) {
        case CLASSIC:
-               sk_unattached_filter_destroy(fp);
+               bpf_prog_destroy(fp);
                break;
        case INTERNAL:
-               sk_filter_free(fp);
+               bpf_prog_free(fp);
                break;
        }
 }
 
-static int __run_one(const struct sk_filter *fp, const void *data,
+static int __run_one(const struct bpf_prog *fp, const void *data,
                     int runs, u64 *duration)
 {
        u64 start, finish;
@@ -1840,7 +1840,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
        start = ktime_to_us(ktime_get());
 
        for (i = 0; i < runs; i++)
-               ret = SK_RUN_FILTER(fp, data);
+               ret = BPF_PROG_RUN(fp, data);
 
        finish = ktime_to_us(ktime_get());
 
@@ -1850,7 +1850,7 @@ static int __run_one(const struct sk_filter *fp, const void *data,
        return ret;
 }
 
-static int run_one(const struct sk_filter *fp, struct bpf_test *test)
+static int run_one(const struct bpf_prog *fp, struct bpf_test *test)
 {
        int err_cnt = 0, i, runs = MAX_TESTRUNS;
 
@@ -1884,7 +1884,7 @@ static __init int test_bpf(void)
        int i, err_cnt = 0, pass_cnt = 0;
 
        for (i = 0; i < ARRAY_SIZE(tests); i++) {
-               struct sk_filter *fp;
+               struct bpf_prog *fp;
                int err;
 
                pr_info("#%d %s ", i, tests[i].descr);
index e60837d..33514d8 100644 (file)
@@ -941,6 +941,37 @@ unlock:
        spin_unlock(ptl);
 }
 
+/*
+ * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
+ * during copy_user_huge_page()'s copy_page_rep(): in the case when
+ * the source page gets split and a tail freed before copy completes.
+ * Called under pmd_lock of checked pmd, so safe from splitting itself.
+ */
+static void get_user_huge_page(struct page *page)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
+               struct page *endpage = page + HPAGE_PMD_NR;
+
+               atomic_add(HPAGE_PMD_NR, &page->_count);
+               while (++page < endpage)
+                       get_huge_page_tail(page);
+       } else {
+               get_page(page);
+       }
+}
+
+static void put_user_huge_page(struct page *page)
+{
+       if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
+               struct page *endpage = page + HPAGE_PMD_NR;
+
+               while (page < endpage)
+                       put_page(page++);
+       } else {
+               put_page(page);
+       }
+}
+
 static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
                                        struct vm_area_struct *vma,
                                        unsigned long address,
@@ -1074,7 +1105,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
                ret |= VM_FAULT_WRITE;
                goto out_unlock;
        }
-       get_page(page);
+       get_user_huge_page(page);
        spin_unlock(ptl);
 alloc:
        if (transparent_hugepage_enabled(vma) &&
@@ -1095,7 +1126,7 @@ alloc:
                                split_huge_page(page);
                                ret |= VM_FAULT_FALLBACK;
                        }
-                       put_page(page);
+                       put_user_huge_page(page);
                }
                count_vm_event(THP_FAULT_FALLBACK);
                goto out;
@@ -1105,7 +1136,7 @@ alloc:
                put_page(new_page);
                if (page) {
                        split_huge_page(page);
-                       put_page(page);
+                       put_user_huge_page(page);
                } else
                        split_huge_page_pmd(vma, address, pmd);
                ret |= VM_FAULT_FALLBACK;
@@ -1127,7 +1158,7 @@ alloc:
 
        spin_lock(ptl);
        if (page)
-               put_page(page);
+               put_user_huge_page(page);
        if (unlikely(!pmd_same(*pmd, orig_pmd))) {
                spin_unlock(ptl);
                mem_cgroup_uncharge_page(new_page);
@@ -2392,8 +2423,6 @@ static void collapse_huge_page(struct mm_struct *mm,
        pmd = mm_find_pmd(mm, address);
        if (!pmd)
                goto out;
-       if (pmd_trans_huge(*pmd))
-               goto out;
 
        anon_vma_lock_write(vma->anon_vma);
 
@@ -2492,8 +2521,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
        pmd = mm_find_pmd(mm, address);
        if (!pmd)
                goto out;
-       if (pmd_trans_huge(*pmd))
-               goto out;
 
        memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
        pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2846,12 +2873,22 @@ void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
 static void split_huge_page_address(struct mm_struct *mm,
                                    unsigned long address)
 {
+       pgd_t *pgd;
+       pud_t *pud;
        pmd_t *pmd;
 
        VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
 
-       pmd = mm_find_pmd(mm, address);
-       if (!pmd)
+       pgd = pgd_offset(mm, address);
+       if (!pgd_present(*pgd))
+               return;
+
+       pud = pud_offset(pgd, address);
+       if (!pud_present(*pud))
+               return;
+
+       pmd = pmd_offset(pud, address);
+       if (!pmd_present(*pmd))
                return;
        /*
         * Caller holds the mmap_sem write mode, so a huge pmd cannot
index 226910c..9221c02 100644 (file)
@@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
                update_mmu_cache(vma, address, ptep);
 }
 
+static int is_hugetlb_entry_migration(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_migration_entry(swp))
+               return 1;
+       else
+               return 0;
+}
+
+static int is_hugetlb_entry_hwpoisoned(pte_t pte)
+{
+       swp_entry_t swp;
+
+       if (huge_pte_none(pte) || pte_present(pte))
+               return 0;
+       swp = pte_to_swp_entry(pte);
+       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
+               return 1;
+       else
+               return 0;
+}
 
 int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                            struct vm_area_struct *vma)
@@ -2559,7 +2584,24 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
                dst_ptl = huge_pte_lock(h, dst, dst_pte);
                src_ptl = huge_pte_lockptr(h, src, src_pte);
                spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
-               if (!huge_pte_none(huge_ptep_get(src_pte))) {
+               entry = huge_ptep_get(src_pte);
+               if (huge_pte_none(entry)) { /* skip none entry */
+                       ;
+               } else if (unlikely(is_hugetlb_entry_migration(entry) ||
+                                   is_hugetlb_entry_hwpoisoned(entry))) {
+                       swp_entry_t swp_entry = pte_to_swp_entry(entry);
+
+                       if (is_write_migration_entry(swp_entry) && cow) {
+                               /*
+                                * COW mappings require pages in both
+                                * parent and child to be set to read.
+                                */
+                               make_migration_entry_read(&swp_entry);
+                               entry = swp_entry_to_pte(swp_entry);
+                               set_huge_pte_at(src, addr, src_pte, entry);
+                       }
+                       set_huge_pte_at(dst, addr, dst_pte, entry);
+               } else {
                        if (cow)
                                huge_ptep_set_wrprotect(src, addr, src_pte);
                        entry = huge_ptep_get(src_pte);
@@ -2578,32 +2620,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
        return ret;
 }
 
-static int is_hugetlb_entry_migration(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_migration_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
-static int is_hugetlb_entry_hwpoisoned(pte_t pte)
-{
-       swp_entry_t swp;
-
-       if (huge_pte_none(pte) || pte_present(pte))
-               return 0;
-       swp = pte_to_swp_entry(pte);
-       if (non_swap_entry(swp) && is_hwpoison_entry(swp))
-               return 1;
-       else
-               return 0;
-}
-
 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
                            unsigned long start, unsigned long end,
                            struct page *ref_page)
index 68710e8..346ddc9 100644 (file)
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -945,7 +945,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
        pmd = mm_find_pmd(mm, addr);
        if (!pmd)
                goto out;
-       BUG_ON(pmd_trans_huge(*pmd));
 
        mmun_start = addr;
        mmun_end   = addr + PAGE_SIZE;
index cd8989c..7211a73 100644 (file)
@@ -435,7 +435,7 @@ static void collect_procs_anon(struct page *page, struct list_head *to_kill,
        if (av == NULL) /* Not actually mapped anymore */
                return;
 
-       pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff = page_to_pgoff(page);
        read_lock(&tasklist_lock);
        for_each_process (tsk) {
                struct anon_vma_chain *vmac;
@@ -469,7 +469,7 @@ static void collect_procs_file(struct page *page, struct list_head *to_kill,
        mutex_lock(&mapping->i_mmap_mutex);
        read_lock(&tasklist_lock);
        for_each_process(tsk) {
-               pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+               pgoff_t pgoff = page_to_pgoff(page);
                struct task_struct *t = task_early_kill(tsk, force_early);
 
                if (!t)
@@ -895,7 +895,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
        struct page *hpage = *hpagep;
        struct page *ppage;
 
-       if (PageReserved(p) || PageSlab(p))
+       if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
                return SWAP_SUCCESS;
 
        /*
@@ -1159,9 +1159,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                                        action_result(pfn, "free buddy, 2nd try", DELAYED);
                                return 0;
                        }
-                       action_result(pfn, "non LRU", IGNORED);
-                       put_page(p);
-                       return -EBUSY;
                }
        }
 
@@ -1194,6 +1191,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                return 0;
        }
 
+       if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
+               goto identify_page_state;
+
        /*
         * For error on the tail page, we should set PG_hwpoison
         * on the head page to show that the hugepage is hwpoisoned
@@ -1243,6 +1243,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
                goto out;
        }
 
+identify_page_state:
        res = -EBUSY;
        /*
         * The first check uses the current page flags which may not have any
index d67fd9f..7e8d820 100644 (file)
@@ -2882,7 +2882,8 @@ static int do_read_fault(struct mm_struct *mm, struct vm_area_struct *vma,
         * if page by the offset is not ready to be mapped (cold cache or
         * something).
         */
-       if (vma->vm_ops->map_pages && fault_around_pages() > 1) {
+       if (vma->vm_ops->map_pages && !(flags & FAULT_FLAG_NONLINEAR) &&
+           fault_around_pages() > 1) {
                pte = pte_offset_map_lock(mm, pmd, address, &ptl);
                do_fault_around(vma, address, pte, pgoff, flags);
                if (!pte_same(*pte, orig_pte))
index 2849742..8f5330d 100644 (file)
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
  * @nodes and @flags,) it's isolated and queued to the pagelist which is
  * passed via @private.)
  */
-static struct vm_area_struct *
+static int
 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
                const nodemask_t *nodes, unsigned long flags, void *private)
 {
-       int err;
-       struct vm_area_struct *first, *vma, *prev;
-
+       int err = 0;
+       struct vm_area_struct *vma, *prev;
 
-       first = find_vma(mm, start);
-       if (!first)
-               return ERR_PTR(-EFAULT);
+       vma = find_vma(mm, start);
+       if (!vma)
+               return -EFAULT;
        prev = NULL;
-       for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) {
+       for (; vma && vma->vm_start < end; vma = vma->vm_next) {
                unsigned long endvma = vma->vm_end;
 
                if (endvma > end)
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
                if (!(flags & MPOL_MF_DISCONTIG_OK)) {
                        if (!vma->vm_next && vma->vm_end < end)
-                               return ERR_PTR(-EFAULT);
+                               return -EFAULT;
                        if (prev && prev->vm_end < vma->vm_start)
-                               return ERR_PTR(-EFAULT);
+                               return -EFAULT;
                }
 
                if (flags & MPOL_MF_LAZY) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
 
                        err = queue_pages_pgd_range(vma, start, endvma, nodes,
                                                flags, private);
-                       if (err) {
-                               first = ERR_PTR(err);
+                       if (err)
                                break;
-                       }
                }
 next:
                prev = vma;
        }
-       return first;
+       return err;
 }
 
 /*
@@ -1156,16 +1153,17 @@ out:
 
 /*
  * Allocate a new page for page migration based on vma policy.
- * Start assuming that page is mapped by vma pointed to by @private.
+ * Start by assuming the page is mapped by the same vma as contains @start.
  * Search forward from there, if not.  N.B., this assumes that the
  * list of pages handed to migrate_pages()--which is how we get here--
  * is in virtual address order.
  */
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
 {
-       struct vm_area_struct *vma = (struct vm_area_struct *)private;
+       struct vm_area_struct *vma;
        unsigned long uninitialized_var(address);
 
+       vma = find_vma(current->mm, start);
        while (vma) {
                address = page_address_in_vma(page, vma);
                if (address != -EFAULT)
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
        return -ENOSYS;
 }
 
-static struct page *new_vma_page(struct page *page, unsigned long private, int **x)
+static struct page *new_page(struct page *page, unsigned long start, int **x)
 {
        return NULL;
 }
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
                     unsigned short mode, unsigned short mode_flags,
                     nodemask_t *nmask, unsigned long flags)
 {
-       struct vm_area_struct *vma;
        struct mm_struct *mm = current->mm;
        struct mempolicy *new;
        unsigned long end;
@@ -1271,11 +1268,9 @@ static long do_mbind(unsigned long start, unsigned long len,
        if (err)
                goto mpol_out;
 
-       vma = queue_pages_range(mm, start, end, nmask,
+       err = queue_pages_range(mm, start, end, nmask,
                          flags | MPOL_MF_INVERT, &pagelist);
-
-       err = PTR_ERR(vma);     /* maybe ... */
-       if (!IS_ERR(vma))
+       if (!err)
                err = mbind_range(mm, start, end, new);
 
        if (!err) {
@@ -1283,9 +1278,8 @@ static long do_mbind(unsigned long start, unsigned long len,
 
                if (!list_empty(&pagelist)) {
                        WARN_ON_ONCE(flags & MPOL_MF_LAZY);
-                       nr_failed = migrate_pages(&pagelist, new_vma_page,
-                                       NULL, (unsigned long)vma,
-                                       MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
+                       nr_failed = migrate_pages(&pagelist, new_page, NULL,
+                               start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
                        if (nr_failed)
                                putback_movable_pages(&pagelist);
                }
@@ -2145,7 +2139,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
        } else
                *new = *old;
 
-       rcu_read_lock();
        if (current_cpuset_is_being_rebound()) {
                nodemask_t mems = cpuset_mems_allowed(current);
                if (new->flags & MPOL_F_REBINDING)
@@ -2153,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
                else
                        mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
        }
-       rcu_read_unlock();
        atomic_set(&new->refcnt, 1);
        return new;
 }
index 63f0cd5..be6dbf9 100644 (file)
@@ -120,8 +120,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
                pmd = mm_find_pmd(mm, addr);
                if (!pmd)
                        goto out;
-               if (pmd_trans_huge(*pmd))
-                       goto out;
 
                ptep = pte_offset_map(pmd, addr);
 
@@ -990,9 +988,10 @@ out:
         * it.  Otherwise, putback_lru_page() will drop the reference grabbed
         * during isolation.
         */
-       if (rc != MIGRATEPAGE_SUCCESS && put_new_page)
+       if (rc != MIGRATEPAGE_SUCCESS && put_new_page) {
+               ClearPageSwapBacked(newpage);
                put_new_page(newpage, private);
-       else
+       else
                putback_lru_page(newpage);
 
        if (result) {
index a5c6736..992a167 100644 (file)
@@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
                        goto out_unlock;
                }
                file = vma->vm_file;
-               fstart = start + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
+               fstart = (start - vma->vm_start) +
+                        ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
                fend = fstart + (min(end, vma->vm_end) - start) - 1;
                start = vma->vm_end;
                if ((flags & MS_SYNC) && file &&
index b78e3a8..4a852f6 100644 (file)
@@ -786,7 +786,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
        for (i = 0; i < VMACACHE_SIZE; i++) {
                /* if the vma is cached, invalidate the entire cache */
                if (curr->vmacache[i] == vma) {
-                       vmacache_invalidate(curr->mm);
+                       vmacache_invalidate(mm);
                        break;
                }
        }
index 4f59fa2..8bcfe3a 100644 (file)
@@ -69,6 +69,7 @@
 
 /* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
 static DEFINE_MUTEX(pcp_batch_high_lock);
+#define MIN_PERCPU_PAGELIST_FRACTION   (8)
 
 #ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
 DEFINE_PER_CPU(int, numa_node);
@@ -815,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
                set_page_count(p, 0);
        } while (++p, --i);
 
-       set_page_refcounted(page);
        set_pageblock_migratetype(page, MIGRATE_CMA);
-       __free_pages(page, pageblock_order);
+
+       if (pageblock_order >= MAX_ORDER) {
+               i = pageblock_nr_pages;
+               p = page;
+               do {
+                       set_page_refcounted(p);
+                       __free_pages(p, MAX_ORDER - 1);
+                       p += MAX_ORDER_NR_PAGES;
+               } while (i -= MAX_ORDER_NR_PAGES);
+       } else {
+               set_page_refcounted(page);
+               __free_pages(page, pageblock_order);
+       }
+
        adjust_managed_page_count(page, pageblock_nr_pages);
 }
 #endif
@@ -4145,7 +4158,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
        memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
 #endif
 
-static int __meminit zone_batchsize(struct zone *zone)
+static int zone_batchsize(struct zone *zone)
 {
 #ifdef CONFIG_MMU
        int batch;
@@ -4261,8 +4274,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
        pageset_update(&p->pcp, high, batch);
 }
 
-static void __meminit pageset_set_high_and_batch(struct zone *zone,
-               struct per_cpu_pageset *pcp)
+static void pageset_set_high_and_batch(struct zone *zone,
+                                      struct per_cpu_pageset *pcp)
 {
        if (percpu_pagelist_fraction)
                pageset_set_high(pcp,
@@ -5881,23 +5894,38 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
        void __user *buffer, size_t *length, loff_t *ppos)
 {
        struct zone *zone;
-       unsigned int cpu;
+       int old_percpu_pagelist_fraction;
        int ret;
 
+       mutex_lock(&pcp_batch_high_lock);
+       old_percpu_pagelist_fraction = percpu_pagelist_fraction;
+
        ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
-       if (!write || (ret < 0))
-               return ret;
+       if (!write || ret < 0)
+               goto out;
+
+       /* Sanity checking to avoid pcp imbalance */
+       if (percpu_pagelist_fraction &&
+           percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
+               percpu_pagelist_fraction = old_percpu_pagelist_fraction;
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* No change? */
+       if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
+               goto out;
 
-       mutex_lock(&pcp_batch_high_lock);
        for_each_populated_zone(zone) {
-               unsigned long  high;
-               high = zone->managed_pages / percpu_pagelist_fraction;
+               unsigned int cpu;
+
                for_each_possible_cpu(cpu)
-                       pageset_set_high(per_cpu_ptr(zone->pageset, cpu),
-                                        high);
+                       pageset_set_high_and_batch(zone,
+                                       per_cpu_ptr(zone->pageset, cpu));
        }
+out:
        mutex_unlock(&pcp_batch_high_lock);
-       return 0;
+       return ret;
 }
 
 int hashdist = HASHDIST_DEFAULT;
@@ -6034,11 +6062,13 @@ static inline int pfn_to_bitidx(struct zone *zone, unsigned long pfn)
 }
 
 /**
- * get_pageblock_flags_group - Return the requested group of flags for the pageblock_nr_pages block of pages
+ * get_pfnblock_flags_mask - Return the requested group of flags for the pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest to retrieve
- * @end_bitidx: The last bit of interest
- * returns pageblock_bits flags
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest to retrieve
+ * @mask: mask of bits that the caller is interested in
+ *
+ * Return: pageblock_bits flags
  */
 unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
                                        unsigned long end_bitidx,
@@ -6063,9 +6093,10 @@ unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
 /**
  * set_pfnblock_flags_mask - Set the requested group of flags for a pageblock_nr_pages block of pages
  * @page: The page within the block of interest
- * @start_bitidx: The first bit of interest
- * @end_bitidx: The last bit of interest
  * @flags: The flags to set
+ * @pfn: The target page frame number
+ * @end_bitidx: The last bit of interest
+ * @mask: mask of bits that the caller is interested in
  */
 void set_pfnblock_flags_mask(struct page *page, unsigned long flags,
                                        unsigned long pfn,
index bf05fc8..22a4a76 100644 (file)
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -517,11 +517,7 @@ void page_unlock_anon_vma_read(struct anon_vma *anon_vma)
 static inline unsigned long
 __vma_address(struct page *page, struct vm_area_struct *vma)
 {
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
-
-       if (unlikely(is_vm_hugetlb_page(vma)))
-               pgoff = page->index << huge_page_order(page_hstate(page));
-
+       pgoff_t pgoff = page_to_pgoff(page);
        return vma->vm_start + ((pgoff - vma->vm_pgoff) << PAGE_SHIFT);
 }
 
@@ -569,6 +565,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
        pgd_t *pgd;
        pud_t *pud;
        pmd_t *pmd = NULL;
+       pmd_t pmde;
 
        pgd = pgd_offset(mm, address);
        if (!pgd_present(*pgd))
@@ -579,7 +576,13 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
                goto out;
 
        pmd = pmd_offset(pud, address);
-       if (!pmd_present(*pmd))
+       /*
+        * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
+        * without holding anon_vma lock for write.  So when looking for a
+        * genuine pmde (in which to find pte), test present and !THP together.
+        */
+       pmde = ACCESS_ONCE(*pmd);
+       if (!pmd_present(pmde) || pmd_trans_huge(pmde))
                pmd = NULL;
 out:
        return pmd;
@@ -615,9 +618,6 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
        if (!pmd)
                return NULL;
 
-       if (pmd_trans_huge(*pmd))
-               return NULL;
-
        pte = pte_offset_map(pmd, address);
        /* Make a quick check before getting the lock */
        if (!sync && !pte_present(*pte)) {
@@ -1635,7 +1635,7 @@ static struct anon_vma *rmap_walk_anon_lock(struct page *page,
 static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 {
        struct anon_vma *anon_vma;
-       pgoff_t pgoff = page->index << (PAGE_CACHE_SHIFT - PAGE_SHIFT);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct anon_vma_chain *avc;
        int ret = SWAP_AGAIN;
 
@@ -1676,7 +1676,7 @@ static int rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc)
 static int rmap_walk_file(struct page *page, struct rmap_walk_control *rwc)
 {
        struct address_space *mapping = page->mapping;
-       pgoff_t pgoff = page->index << compound_order(page);
+       pgoff_t pgoff = page_to_pgoff(page);
        struct vm_area_struct *vma;
        int ret = SWAP_AGAIN;
 
index f484c27..af68b15 100644 (file)
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
 #define SHORT_SYMLINK_LEN 128
 
 /*
- * shmem_fallocate and shmem_writepage communicate via inode->i_private
- * (with i_mutex making sure that it has only one user at a time):
- * we would prefer not to enlarge the shmem inode just for that.
+ * shmem_fallocate communicates with shmem_fault or shmem_writepage via
+ * inode->i_private (with i_mutex making sure that it has only one user at
+ * a time): we would prefer not to enlarge the shmem inode just for that.
  */
 struct shmem_falloc {
+       wait_queue_head_t *waitq; /* faults into hole wait for punch to end */
        pgoff_t start;          /* start of range currently being fallocated */
        pgoff_t next;           /* the next page offset to be fallocated */
        pgoff_t nr_falloced;    /* how many new pages have been fallocated */
@@ -467,23 +468,20 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                return;
 
        index = start;
-       for ( ; ; ) {
+       while (index < end) {
                cond_resched();
 
                pvec.nr = find_get_entries(mapping, index,
                                min(end - index, (pgoff_t)PAGEVEC_SIZE),
                                pvec.pages, indices);
                if (!pvec.nr) {
-                       if (index == start || unfalloc)
+                       /* If all gone or hole-punch or unfalloc, we're done */
+                       if (index == start || end != -1)
                                break;
+                       /* But if truncating, restart to make sure all gone */
                        index = start;
                        continue;
                }
-               if ((index == start || unfalloc) && indices[0] >= end) {
-                       pagevec_remove_exceptionals(&pvec);
-                       pagevec_release(&pvec);
-                       break;
-               }
                mem_cgroup_uncharge_start();
                for (i = 0; i < pagevec_count(&pvec); i++) {
                        struct page *page = pvec.pages[i];
@@ -495,8 +493,12 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                        if (radix_tree_exceptional_entry(page)) {
                                if (unfalloc)
                                        continue;
-                               nr_swaps_freed += !shmem_free_swap(mapping,
-                                                               index, page);
+                               if (shmem_free_swap(mapping, index, page)) {
+                                       /* Swap was replaced by page: retry */
+                                       index--;
+                                       break;
+                               }
+                               nr_swaps_freed++;
                                continue;
                        }
 
@@ -505,6 +507,11 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
                                if (page->mapping == mapping) {
                                        VM_BUG_ON_PAGE(PageWriteback(page), page);
                                        truncate_inode_page(mapping, page);
+                               } else {
+                                       /* Page was replaced by swap: retry */
+                                       unlock_page(page);
+                                       index--;
+                                       break;
                                }
                        }
                        unlock_page(page);
@@ -759,6 +766,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
                        spin_lock(&inode->i_lock);
                        shmem_falloc = inode->i_private;
                        if (shmem_falloc &&
+                           !shmem_falloc->waitq &&
                            index >= shmem_falloc->start &&
                            index < shmem_falloc->next)
                                shmem_falloc->nr_unswapped++;
@@ -1027,6 +1035,9 @@ repeat:
                goto failed;
        }
 
+       if (page && sgp == SGP_WRITE)
+               mark_page_accessed(page);
+
        /* fallocated page? */
        if (page && !PageUptodate(page)) {
                if (sgp != SGP_READ)
@@ -1108,6 +1119,9 @@ repeat:
                shmem_recalc_inode(inode);
                spin_unlock(&info->lock);
 
+               if (sgp == SGP_WRITE)
+                       mark_page_accessed(page);
+
                delete_from_swap_cache(page);
                set_page_dirty(page);
                swap_free(swap);
@@ -1134,6 +1148,9 @@ repeat:
 
                __SetPageSwapBacked(page);
                __set_page_locked(page);
+               if (sgp == SGP_WRITE)
+                       init_page_accessed(page);
+
                error = mem_cgroup_charge_file(page, current->mm,
                                                gfp & GFP_RECLAIM_MASK);
                if (error)
@@ -1233,6 +1250,64 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
        int error;
        int ret = VM_FAULT_LOCKED;
 
+       /*
+        * Trinity finds that probing a hole which tmpfs is punching can
+        * prevent the hole-punch from ever completing: which in turn
+        * locks writers out with its hold on i_mutex.  So refrain from
+        * faulting pages into the hole while it's being punched.  Although
+        * shmem_undo_range() does remove the additions, it may be unable to
+        * keep up, as each new page needs its own unmap_mapping_range() call,
+        * and the i_mmap tree grows ever slower to scan if new vmas are added.
+        *
+        * It does not matter if we sometimes reach this check just before the
+        * hole-punch begins, so that one fault then races with the punch:
+        * we just need to make racing faults a rare case.
+        *
+        * The implementation below would be much simpler if we just used a
+        * standard mutex or completion: but we cannot take i_mutex in fault,
+        * and bloating every shmem inode for this unlikely case would be sad.
+        */
+       if (unlikely(inode->i_private)) {
+               struct shmem_falloc *shmem_falloc;
+
+               spin_lock(&inode->i_lock);
+               shmem_falloc = inode->i_private;
+               if (shmem_falloc &&
+                   shmem_falloc->waitq &&
+                   vmf->pgoff >= shmem_falloc->start &&
+                   vmf->pgoff < shmem_falloc->next) {
+                       wait_queue_head_t *shmem_falloc_waitq;
+                       DEFINE_WAIT(shmem_fault_wait);
+
+                       ret = VM_FAULT_NOPAGE;
+                       if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
+                          !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
+                               /* It's polite to up mmap_sem if we can */
+                               up_read(&vma->vm_mm->mmap_sem);
+                               ret = VM_FAULT_RETRY;
+                       }
+
+                       shmem_falloc_waitq = shmem_falloc->waitq;
+                       prepare_to_wait(shmem_falloc_waitq, &shmem_fault_wait,
+                                       TASK_UNINTERRUPTIBLE);
+                       spin_unlock(&inode->i_lock);
+                       schedule();
+
+                       /*
+                        * shmem_falloc_waitq points into the shmem_fallocate()
+                        * stack of the hole-punching task: shmem_falloc_waitq
+                        * is usually invalid by the time we reach here, but
+                        * finish_wait() does not dereference it in that case;
+                        * though i_lock needed lest racing with wake_up_all().
+                        */
+                       spin_lock(&inode->i_lock);
+                       finish_wait(shmem_falloc_waitq, &shmem_fault_wait);
+                       spin_unlock(&inode->i_lock);
+                       return ret;
+               }
+               spin_unlock(&inode->i_lock);
+       }
+
        error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
        if (error)
                return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1372,13 +1447,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
                        loff_t pos, unsigned len, unsigned flags,
                        struct page **pagep, void **fsdata)
 {
-       int ret;
        struct inode *inode = mapping->host;
        pgoff_t index = pos >> PAGE_CACHE_SHIFT;
-       ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
-       if (ret == 0 && *pagep)
-               init_page_accessed(*pagep);
-       return ret;
+       return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
 }
 
 static int
@@ -1724,18 +1795,34 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
        pgoff_t start, index, end;
        int error;
 
+       if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
+               return -EOPNOTSUPP;
+
        mutex_lock(&inode->i_mutex);
 
        if (mode & FALLOC_FL_PUNCH_HOLE) {
                struct address_space *mapping = file->f_mapping;
                loff_t unmap_start = round_up(offset, PAGE_SIZE);
                loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
+               DECLARE_WAIT_QUEUE_HEAD_ONSTACK(shmem_falloc_waitq);
+
+               shmem_falloc.waitq = &shmem_falloc_waitq;
+               shmem_falloc.start = unmap_start >> PAGE_SHIFT;
+               shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
+               spin_lock(&inode->i_lock);
+               inode->i_private = &shmem_falloc;
+               spin_unlock(&inode->i_lock);
 
                if ((u64)unmap_end > (u64)unmap_start)
                        unmap_mapping_range(mapping, unmap_start,
                                            1 + unmap_end - unmap_start, 0);
                shmem_truncate_range(inode, offset, offset + len - 1);
                /* No need to unmap again: hole-punching leaves COWed pages */
+
+               spin_lock(&inode->i_lock);
+               inode->i_private = NULL;
+               wake_up_all(&shmem_falloc_waitq);
+               spin_unlock(&inode->i_lock);
                error = 0;
                goto out;
        }
@@ -1753,6 +1840,7 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
                goto out;
        }
 
+       shmem_falloc.waitq = NULL;
        shmem_falloc.start = start;
        shmem_falloc.next  = start;
        shmem_falloc.nr_falloced = 0;
index 9ca3b87..3070b92 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,6 +386,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 
 #endif
 
+#define OBJECT_FREE (0)
+#define OBJECT_ACTIVE (1)
+
+#ifdef CONFIG_DEBUG_SLAB_LEAK
+
+static void set_obj_status(struct page *page, int idx, int val)
+{
+       int freelist_size;
+       char *status;
+       struct kmem_cache *cachep = page->slab_cache;
+
+       freelist_size = cachep->num * sizeof(freelist_idx_t);
+       status = (char *)page->freelist + freelist_size;
+       status[idx] = val;
+}
+
+static inline unsigned int get_obj_status(struct page *page, int idx)
+{
+       int freelist_size;
+       char *status;
+       struct kmem_cache *cachep = page->slab_cache;
+
+       freelist_size = cachep->num * sizeof(freelist_idx_t);
+       status = (char *)page->freelist + freelist_size;
+
+       return status[idx];
+}
+
+#else
+static inline void set_obj_status(struct page *page, int idx, int val) {}
+
+#endif
+
 /*
  * Do not go above this order unless 0 objects fit into the slab or
  * overridden on the command line.
@@ -576,12 +609,30 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
        return cachep->array[smp_processor_id()];
 }
 
+static size_t calculate_freelist_size(int nr_objs, size_t align)
+{
+       size_t freelist_size;
+
+       freelist_size = nr_objs * sizeof(freelist_idx_t);
+       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+               freelist_size += nr_objs * sizeof(char);
+
+       if (align)
+               freelist_size = ALIGN(freelist_size, align);
+
+       return freelist_size;
+}
+
 static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
                                size_t idx_size, size_t align)
 {
        int nr_objs;
+       size_t remained_size;
        size_t freelist_size;
+       int extra_space = 0;
 
+       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+               extra_space = sizeof(char);
        /*
         * Ignore padding for the initial guess. The padding
         * is at most @align-1 bytes, and @buffer_size is at
@@ -590,14 +641,15 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
         * into the memory allocation when taking the padding
         * into account.
         */
-       nr_objs = slab_size / (buffer_size + idx_size);
+       nr_objs = slab_size / (buffer_size + idx_size + extra_space);
 
        /*
         * This calculated number will be either the right
         * amount, or one greater than what we want.
         */
-       freelist_size = slab_size - nr_objs * buffer_size;
-       if (freelist_size < ALIGN(nr_objs * idx_size, align))
+       remained_size = slab_size - nr_objs * buffer_size;
+       freelist_size = calculate_freelist_size(nr_objs, align);
+       if (remained_size < freelist_size)
                nr_objs--;
 
        return nr_objs;
@@ -635,7 +687,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
        } else {
                nr_objs = calculate_nr_objs(slab_size, buffer_size,
                                        sizeof(freelist_idx_t), align);
-               mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align);
+               mgmt_size = calculate_freelist_size(nr_objs, align);
        }
        *num = nr_objs;
        *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
@@ -2041,13 +2093,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
                        break;
 
                if (flags & CFLGS_OFF_SLAB) {
+                       size_t freelist_size_per_obj = sizeof(freelist_idx_t);
                        /*
                         * Max number of objs-per-slab for caches which
                         * use off-slab slabs. Needed to avoid a possible
                         * looping condition in cache_grow().
                         */
+                       if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
+                               freelist_size_per_obj += sizeof(char);
                        offslab_limit = size;
-                       offslab_limit /= sizeof(freelist_idx_t);
+                       offslab_limit /= freelist_size_per_obj;
 
                        if (num > offslab_limit)
                                break;
@@ -2294,8 +2349,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
        if (!cachep->num)
                return -E2BIG;
 
-       freelist_size =
-               ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
+       freelist_size = calculate_freelist_size(cachep->num, cachep->align);
 
        /*
         * If the slab has been placed off-slab, and we have enough space then
@@ -2308,7 +2362,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
 
        if (flags & CFLGS_OFF_SLAB) {
                /* really off slab. No need for manual alignment */
-               freelist_size = cachep->num * sizeof(freelist_idx_t);
+               freelist_size = calculate_freelist_size(cachep->num, 0);
 
 #ifdef CONFIG_PAGE_POISONING
                /* If we're going to use the generic kernel_map_pages()
@@ -2612,6 +2666,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
                if (cachep->ctor)
                        cachep->ctor(objp);
 #endif
+               set_obj_status(page, i, OBJECT_FREE);
                set_free_obj(page, i, i);
        }
 }
@@ -2820,6 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
        BUG_ON(objnr >= cachep->num);
        BUG_ON(objp != index_to_obj(cachep, page, objnr));
 
+       set_obj_status(page, objnr, OBJECT_FREE);
        if (cachep->flags & SLAB_POISON) {
 #ifdef CONFIG_DEBUG_PAGEALLOC
                if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2953,6 +3009,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                                gfp_t flags, void *objp, unsigned long caller)
 {
+       struct page *page;
+
        if (!objp)
                return objp;
        if (cachep->flags & SLAB_POISON) {
@@ -2983,6 +3041,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
                *dbg_redzone1(cachep, objp) = RED_ACTIVE;
                *dbg_redzone2(cachep, objp) = RED_ACTIVE;
        }
+
+       page = virt_to_head_page(objp);
+       set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
        objp += obj_offset(cachep);
        if (cachep->ctor && cachep->flags & SLAB_POISON)
                cachep->ctor(objp);
@@ -4219,21 +4280,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
                                                struct page *page)
 {
        void *p;
-       int i, j;
+       int i;
 
        if (n[0] == n[1])
                return;
        for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
-               bool active = true;
-
-               for (j = page->active; j < c->num; j++) {
-                       /* Skip freed item */
-                       if (get_free_obj(page, j) == i) {
-                               active = false;
-                               break;
-                       }
-               }
-               if (!active)
+               if (get_obj_status(page, i) != OBJECT_ACTIVE)
                        continue;
 
                if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
index 735e01a..d31c4ba 100644 (file)
@@ -55,7 +55,7 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
                        continue;
                }
 
-#if !defined(CONFIG_SLUB) || !defined(CONFIG_SLUB_DEBUG_ON)
+#if !defined(CONFIG_SLUB)
                if (!strcmp(s->name, name)) {
                        pr_err("%s (%s): Cache name already exists.\n",
                               __func__, name);
index b2b0473..7300480 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1881,7 +1881,7 @@ redo:
 
        new.frozen = 0;
 
-       if (!new.inuse && n->nr_partial > s->min_partial)
+       if (!new.inuse && n->nr_partial >= s->min_partial)
                m = M_FREE;
        else if (new.freelist) {
                m = M_PARTIAL;
@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
                                new.freelist, new.counters,
                                "unfreezing slab"));
 
-               if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
+               if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
                        page->next = discard_page;
                        discard_page = page;
                } else {
@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
                 return;
         }
 
-       if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
+       if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
                goto slab_empty;
 
        /*
index 6a78c81..eda2473 100644 (file)
@@ -355,14 +355,16 @@ void truncate_inode_pages_range(struct address_space *mapping,
        for ( ; ; ) {
                cond_resched();
                if (!pagevec_lookup_entries(&pvec, mapping, index,
-                       min(end - index, (pgoff_t)PAGEVEC_SIZE),
-                       indices)) {
+                       min(end - index, (pgoff_t)PAGEVEC_SIZE), indices)) {
+                       /* If all gone from start onwards, we're done */
                        if (index == start)
                                break;
+                       /* Otherwise restart to make sure all gone */
                        index = start;
                        continue;
                }
                if (index == start && indices[0] >= end) {
+                       /* All gone out of hole to be punched, we're done */
                        pagevec_remove_exceptionals(&pvec);
                        pagevec_release(&pvec);
                        break;
@@ -373,8 +375,11 @@ void truncate_inode_pages_range(struct address_space *mapping,
 
                        /* We rely upon deletion not changing page->index */
                        index = indices[i];
-                       if (index >= end)
+                       if (index >= end) {
+                               /* Restart punch to make sure all gone */
+                               index = start - 1;
                                break;
+                       }
 
                        if (radix_tree_exceptional_entry(page)) {
                                clear_exceptional_entry(mapping, index, page);
index 350ecfa..142eef5 100644 (file)
@@ -296,7 +296,6 @@ static int uncompress_udp_header(struct sk_buff *skb, struct udphdr *uh)
                default:
                        pr_debug("ERROR: unknown UDP format\n");
                        goto err;
-                       break;
                }
 
                pr_debug("uncompressed UDP ports: src = %d, dst = %d\n",
index 05eea6b..7c174b6 100644 (file)
@@ -126,6 +126,6 @@ static void fc_setup(struct net_device *dev)
  */
 struct net_device *alloc_fcdev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "fc%d", fc_setup);
+       return alloc_netdev(sizeof_priv, "fc%d", NET_NAME_UNKNOWN, fc_setup);
 }
 EXPORT_SYMBOL(alloc_fcdev);
index 9cda406..59e7346 100644 (file)
@@ -207,7 +207,8 @@ static void fddi_setup(struct net_device *dev)
  */
 struct net_device *alloc_fddidev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "fddi%d", fddi_setup);
+       return alloc_netdev(sizeof_priv, "fddi%d", NET_NAME_UNKNOWN,
+                           fddi_setup);
 }
 EXPORT_SYMBOL(alloc_fddidev);
 
index 5ff2a71..2e03f82 100644 (file)
@@ -228,7 +228,8 @@ static void hippi_setup(struct net_device *dev)
 
 struct net_device *alloc_hippi_dev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "hip%d", hippi_setup);
+       return alloc_netdev(sizeof_priv, "hip%d", NET_NAME_UNKNOWN,
+                           hippi_setup);
 }
 
 EXPORT_SYMBOL(alloc_hippi_dev);
index 44ebd5c..64c6bed 100644 (file)
@@ -250,7 +250,8 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
                snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
        }
 
-       new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name, vlan_setup);
+       new_dev = alloc_netdev(sizeof(struct vlan_dev_priv), name,
+                              NET_NAME_UNKNOWN, vlan_setup);
 
        if (new_dev == NULL)
                return -ENOBUFS;
@@ -324,23 +325,24 @@ static void vlan_transfer_features(struct net_device *dev,
        netdev_update_features(vlandev);
 }
 
-static void __vlan_device_event(struct net_device *dev, unsigned long event)
+static int __vlan_device_event(struct net_device *dev, unsigned long event)
 {
+       int err = 0;
+
        switch (event) {
        case NETDEV_CHANGENAME:
                vlan_proc_rem_dev(dev);
-               if (vlan_proc_add_dev(dev) < 0)
-                       pr_warn("failed to change proc name for %s\n",
-                               dev->name);
+               err = vlan_proc_add_dev(dev);
                break;
        case NETDEV_REGISTER:
-               if (vlan_proc_add_dev(dev) < 0)
-                       pr_warn("failed to add proc entry for %s\n", dev->name);
+               err = vlan_proc_add_dev(dev);
                break;
        case NETDEV_UNREGISTER:
                vlan_proc_rem_dev(dev);
                break;
        }
+
+       return err;
 }
 
 static int vlan_device_event(struct notifier_block *unused, unsigned long event,
@@ -355,8 +357,12 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
        bool last = false;
        LIST_HEAD(list);
 
-       if (is_vlan_dev(dev))
-               __vlan_device_event(dev, event);
+       if (is_vlan_dev(dev)) {
+               int err = __vlan_device_event(dev, event);
+
+               if (err)
+                       return notifier_from_errno(err);
+       }
 
        if ((event == NETDEV_UP) &&
            (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)) {
index 9012b1c..75d4277 100644 (file)
@@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
 
 static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
 {
-       if (skb_cow(skb, skb_headroom(skb)) < 0)
+       if (skb_cow(skb, skb_headroom(skb)) < 0) {
+               kfree_skb(skb);
                return NULL;
+       }
+
        memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
        skb->mac_header += VLAN_HLEN;
        return skb;
index ad2ac3c..35a6b6b 100644 (file)
@@ -385,6 +385,8 @@ static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
        case SIOCGMIIPHY:
        case SIOCGMIIREG:
        case SIOCSMIIREG:
+       case SIOCSHWTSTAMP:
+       case SIOCGHWTSTAMP:
                if (netif_device_present(real_dev) && ops->ndo_do_ioctl)
                        err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd);
                break;
@@ -627,8 +629,6 @@ static void vlan_dev_uninit(struct net_device *dev)
        struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
        int i;
 
-       free_percpu(vlan->vlan_pcpu_stats);
-       vlan->vlan_pcpu_stats = NULL;
        for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
                while ((pm = vlan->egress_priority_map[i]) != NULL) {
                        vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +785,15 @@ static const struct net_device_ops vlan_netdev_ops = {
        .ndo_get_lock_subclass  = vlan_dev_get_lock_subclass,
 };
 
+static void vlan_dev_free(struct net_device *dev)
+{
+       struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
+
+       free_percpu(vlan->vlan_pcpu_stats);
+       vlan->vlan_pcpu_stats = NULL;
+       free_netdev(dev);
+}
+
 void vlan_setup(struct net_device *dev)
 {
        ether_setup(dev);
@@ -794,7 +803,7 @@ void vlan_setup(struct net_device *dev)
        dev->tx_queue_len       = 0;
 
        dev->netdev_ops         = &vlan_netdev_ops;
-       dev->destructor         = free_netdev;
+       dev->destructor         = vlan_dev_free;
        dev->ethtool_ops        = &vlan_ethtool_ops;
 
        memset(dev->broadcast, 0, ETH_ALEN);
index 1d0e892..ae63cf7 100644 (file)
@@ -171,6 +171,8 @@ int vlan_proc_add_dev(struct net_device *vlandev)
        struct vlan_dev_priv *vlan = vlan_dev_priv(vlandev);
        struct vlan_net *vn = net_generic(dev_net(vlandev), vlan_net_id);
 
+       if (!strcmp(vlandev->name, name_conf))
+               return -EINVAL;
        vlan->dent =
                proc_create_data(vlandev->name, S_IFREG|S_IRUSR|S_IWUSR,
                                 vn->proc_vlan_dir, &vlandev_fops, vlandev);
index 0004cba..e86a9be 100644 (file)
@@ -959,7 +959,6 @@ static int p9_client_version(struct p9_client *c)
                break;
        default:
                return -EINVAL;
-               break;
        }
 
        if (IS_ERR(req))
index 01a1082..c00897f 100644 (file)
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
                goto drop;
 
        /* Queue packet (standard) */
-       skb->sk = sock;
-
        if (sock_queue_rcv_skb(sock, skb) < 0)
                goto drop;
 
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
        if (!skb)
                goto out;
 
-       skb->sk = sk;
        skb_reserve(skb, ddp_dl->header_length);
        skb_reserve(skb, dev->hard_header_len);
        skb->dev = dev;
@@ -1808,7 +1805,7 @@ static int atalk_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
                long amount = 0;
 
                if (skb)
-               amount = skb->len - sizeof(struct ddpehdr);
+                       amount = skb->len - sizeof(struct ddpehdr);
                rc = put_user(amount, (int __user *)argp);
                break;
        }
index 6c8016f..e4158b8 100644 (file)
@@ -39,6 +39,7 @@ static void ltalk_setup(struct net_device *dev)
 
 struct net_device *alloc_ltalkdev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "lt%d", ltalk_setup);
+       return alloc_netdev(sizeof_priv, "lt%d", NET_NAME_UNKNOWN,
+                           ltalk_setup);
 }
 EXPORT_SYMBOL(alloc_ltalkdev);
index 403e71f..cc78538 100644 (file)
@@ -682,8 +682,8 @@ static int br2684_create(void __user *arg)
 
        netdev = alloc_netdev(sizeof(struct br2684_dev),
                              ni.ifname[0] ? ni.ifname : "nas%d",
-                             (payload == p_routed) ?
-                             br2684_setup_routed : br2684_setup);
+                             NET_NAME_UNKNOWN,
+                             (payload == p_routed) ? br2684_setup_routed : br2684_setup);
        if (!netdev)
                return -ENOMEM;
 
index ba291ce..4633904 100644 (file)
@@ -520,7 +520,8 @@ static int clip_create(int number)
                        if (PRIV(dev)->number >= number)
                                number = PRIV(dev)->number + 1;
        }
-       dev = alloc_netdev(sizeof(struct clip_priv), "", clip_setup);
+       dev = alloc_netdev(sizeof(struct clip_priv), "", NET_NAME_UNKNOWN,
+                          clip_setup);
        if (!dev)
                return -ENOMEM;
        clip_priv = PRIV(dev);
index 6f0d9ec..a957c81 100644 (file)
@@ -800,11 +800,6 @@ static int batadv_check_claim_group(struct batadv_priv *bat_priv,
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
        bla_dst_own = &bat_priv->bla.claim_dest;
 
-       /* check if it is a claim packet in general */
-       if (memcmp(bla_dst->magic, bla_dst_own->magic,
-                  sizeof(bla_dst->magic)) != 0)
-               return 0;
-
        /* if announcement packet, use the source,
         * otherwise assume it is in the hw_src
         */
@@ -866,12 +861,13 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
                                    struct batadv_hard_iface *primary_if,
                                    struct sk_buff *skb)
 {
-       struct batadv_bla_claim_dst *bla_dst;
+       struct batadv_bla_claim_dst *bla_dst, *bla_dst_own;
        uint8_t *hw_src, *hw_dst;
-       struct vlan_ethhdr *vhdr;
+       struct vlan_hdr *vhdr, vhdr_buf;
        struct ethhdr *ethhdr;
        struct arphdr *arphdr;
        unsigned short vid;
+       int vlan_depth = 0;
        __be16 proto;
        int headlen;
        int ret;
@@ -882,9 +878,24 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        proto = ethhdr->h_proto;
        headlen = ETH_HLEN;
        if (vid & BATADV_VLAN_HAS_TAG) {
-               vhdr = vlan_eth_hdr(skb);
-               proto = vhdr->h_vlan_encapsulated_proto;
-               headlen += VLAN_HLEN;
+               /* Traverse the VLAN/Ethertypes.
+                *
+                * At this point it is known that the first protocol is a VLAN
+                * header, so start checking at the encapsulated protocol.
+                *
+                * The depth of the VLAN headers is recorded to drop BLA claim
+                * frames encapsulated into multiple VLAN headers (QinQ).
+                */
+               do {
+                       vhdr = skb_header_pointer(skb, headlen, VLAN_HLEN,
+                                                 &vhdr_buf);
+                       if (!vhdr)
+                               return 0;
+
+                       proto = vhdr->h_vlan_encapsulated_proto;
+                       headlen += VLAN_HLEN;
+                       vlan_depth++;
+               } while (proto == htons(ETH_P_8021Q));
        }
 
        if (proto != htons(ETH_P_ARP))
@@ -914,6 +925,19 @@ static int batadv_bla_process_claim(struct batadv_priv *bat_priv,
        hw_src = (uint8_t *)arphdr + sizeof(struct arphdr);
        hw_dst = hw_src + ETH_ALEN + 4;
        bla_dst = (struct batadv_bla_claim_dst *)hw_dst;
+       bla_dst_own = &bat_priv->bla.claim_dest;
+
+       /* check if it is a claim frame in general */
+       if (memcmp(bla_dst->magic, bla_dst_own->magic,
+                  sizeof(bla_dst->magic)) != 0)
+               return 0;
+
+       /* check if there is a claim frame encapsulated deeper in (QinQ) and
+        * drop that, as this is not supported by BLA but should also not be
+        * sent via the mesh.
+        */
+       if (vlan_depth > 1)
+               return 1;
 
        /* check if it is a claim frame. */
        ret = batadv_check_claim_group(bat_priv, primary_if, hw_src, hw_dst,
index e7ee65d..e0a7239 100644 (file)
@@ -448,10 +448,15 @@ out:
  *  possibly free it
  * @softif_vlan: the vlan object to release
  */
-void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *softif_vlan)
+void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
 {
-       if (atomic_dec_and_test(&softif_vlan->refcount))
-               kfree_rcu(softif_vlan, rcu);
+       if (atomic_dec_and_test(&vlan->refcount)) {
+               spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+               hlist_del_rcu(&vlan->list);
+               spin_unlock_bh(&vlan->bat_priv->softif_vlan_list_lock);
+
+               kfree_rcu(vlan, rcu);
+       }
 }
 
 /**
@@ -505,6 +510,7 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
        if (!vlan)
                return -ENOMEM;
 
+       vlan->bat_priv = bat_priv;
        vlan->vid = vid;
        atomic_set(&vlan->refcount, 1);
 
@@ -516,6 +522,10 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                return err;
        }
 
+       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
+       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+
        /* add a new TT local entry. This one will be marked with the NOPURGE
         * flag
         */
@@ -523,10 +533,6 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
                            bat_priv->soft_iface->dev_addr, vid,
                            BATADV_NULL_IFINDEX, BATADV_NO_MARK);
 
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
        return 0;
 }
 
@@ -538,18 +544,13 @@ int batadv_softif_create_vlan(struct batadv_priv *bat_priv, unsigned short vid)
 static void batadv_softif_destroy_vlan(struct batadv_priv *bat_priv,
                                       struct batadv_softif_vlan *vlan)
 {
-       spin_lock_bh(&bat_priv->softif_vlan_list_lock);
-       hlist_del_rcu(&vlan->list);
-       spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
-
-       batadv_sysfs_del_vlan(bat_priv, vlan);
-
        /* explicitly remove the associated TT local entry because it is marked
         * with the NOPURGE flag
         */
        batadv_tt_local_remove(bat_priv, bat_priv->soft_iface->dev_addr,
                               vlan->vid, "vlan interface destroyed", false);
 
+       batadv_sysfs_del_vlan(bat_priv, vlan);
        batadv_softif_vlan_free_ref(vlan);
 }
 
@@ -567,6 +568,8 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
                                    unsigned short vid)
 {
        struct batadv_priv *bat_priv = netdev_priv(dev);
+       struct batadv_softif_vlan *vlan;
+       int ret;
 
        /* only 802.1Q vlans are supported.
         * batman-adv does not know how to handle other types
@@ -576,7 +579,36 @@ static int batadv_interface_add_vid(struct net_device *dev, __be16 proto,
 
        vid |= BATADV_VLAN_HAS_TAG;
 
-       return batadv_softif_create_vlan(bat_priv, vid);
+       /* if a new vlan is getting created and it already exists, it means that
+        * it was not deleted yet. batadv_softif_vlan_get() increases the
+        * refcount in order to revive the object.
+        *
+        * if it does not exist then create it.
+        */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       if (!vlan)
+               return batadv_softif_create_vlan(bat_priv, vid);
+
+       /* recreate the sysfs object if it was already destroyed (and it should
+        * be since we received a kill_vid() for this vlan
+        */
+       if (!vlan->kobj) {
+               ret = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+               if (ret) {
+                       batadv_softif_vlan_free_ref(vlan);
+                       return ret;
+               }
+       }
+
+       /* add a new TT local entry. This one will be marked with the NOPURGE
+        * flag. This must be added again, even if the vlan object already
+        * exists, because the entry was deleted by kill_vid()
+        */
+       batadv_tt_local_add(bat_priv->soft_iface,
+                           bat_priv->soft_iface->dev_addr, vid,
+                           BATADV_NULL_IFINDEX, BATADV_NO_MARK);
+
+       return 0;
 }
 
 /**
@@ -895,7 +927,7 @@ struct net_device *batadv_softif_create(const char *name)
        int ret;
 
        soft_iface = alloc_netdev(sizeof(struct batadv_priv), name,
-                                 batadv_softif_init_early);
+                                 NET_NAME_UNKNOWN, batadv_softif_init_early);
        if (!soft_iface)
                return NULL;
 
index fc47baa..f40cb04 100644 (file)
@@ -900,32 +900,24 @@ int batadv_throw_uevent(struct batadv_priv *bat_priv, enum batadv_uev_type type,
 
        bat_kobj = &bat_priv->soft_iface->dev.kobj;
 
-       uevent_env[0] = kmalloc(strlen(BATADV_UEV_TYPE_VAR) +
-                               strlen(batadv_uev_type_str[type]) + 1,
-                               GFP_ATOMIC);
+       uevent_env[0] = kasprintf(GFP_ATOMIC,
+                                 "%s%s", BATADV_UEV_TYPE_VAR,
+                                 batadv_uev_type_str[type]);
        if (!uevent_env[0])
                goto out;
 
-       sprintf(uevent_env[0], "%s%s", BATADV_UEV_TYPE_VAR,
-               batadv_uev_type_str[type]);
-
-       uevent_env[1] = kmalloc(strlen(BATADV_UEV_ACTION_VAR) +
-                               strlen(batadv_uev_action_str[action]) + 1,
-                               GFP_ATOMIC);
+       uevent_env[1] = kasprintf(GFP_ATOMIC,
+                                 "%s%s", BATADV_UEV_ACTION_VAR,
+                                 batadv_uev_action_str[action]);
        if (!uevent_env[1])
                goto out;
 
-       sprintf(uevent_env[1], "%s%s", BATADV_UEV_ACTION_VAR,
-               batadv_uev_action_str[action]);
-
        /* If the event is DEL, ignore the data field */
        if (action != BATADV_UEV_DEL) {
-               uevent_env[2] = kmalloc(strlen(BATADV_UEV_DATA_VAR) +
-                                       strlen(data) + 1, GFP_ATOMIC);
+               uevent_env[2] = kasprintf(GFP_ATOMIC,
+                                         "%s%s", BATADV_UEV_DATA_VAR, data);
                if (!uevent_env[2])
                        goto out;
-
-               sprintf(uevent_env[2], "%s%s", BATADV_UEV_DATA_VAR, data);
        }
 
        ret = kobject_uevent_env(bat_kobj, KOBJ_CHANGE, uevent_env);
index d636bde..5f59e7f 100644 (file)
@@ -511,6 +511,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        struct batadv_priv *bat_priv = netdev_priv(soft_iface);
        struct batadv_tt_local_entry *tt_local;
        struct batadv_tt_global_entry *tt_global = NULL;
+       struct batadv_softif_vlan *vlan;
        struct net_device *in_dev = NULL;
        struct hlist_head *head;
        struct batadv_tt_orig_list_entry *orig_entry;
@@ -572,6 +573,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (!tt_local)
                goto out;
 
+       /* increase the refcounter of the related vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+
        batadv_dbg(BATADV_DBG_TT, bat_priv,
                   "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
                   addr, BATADV_PRINT_VID(vid),
@@ -604,6 +608,7 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
        if (unlikely(hash_added != 0)) {
                /* remove the reference for the hash */
                batadv_tt_local_entry_free_ref(tt_local);
+               batadv_softif_vlan_free_ref(vlan);
                goto out;
        }
 
@@ -1009,6 +1014,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
 {
        struct batadv_tt_local_entry *tt_local_entry;
        uint16_t flags, curr_flags = BATADV_NO_FLAGS;
+       struct batadv_softif_vlan *vlan;
 
        tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
        if (!tt_local_entry)
@@ -1039,6 +1045,11 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
        hlist_del_rcu(&tt_local_entry->common.hash_entry);
        batadv_tt_local_entry_free_ref(tt_local_entry);
 
+       /* decrease the reference held for this vlan */
+       vlan = batadv_softif_vlan_get(bat_priv, vid);
+       batadv_softif_vlan_free_ref(vlan);
+       batadv_softif_vlan_free_ref(vlan);
+
 out:
        if (tt_local_entry)
                batadv_tt_local_entry_free_ref(tt_local_entry);
@@ -1111,6 +1122,7 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
        spinlock_t *list_lock; /* protects write access to the hash lists */
        struct batadv_tt_common_entry *tt_common_entry;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        uint32_t i;
@@ -1131,6 +1143,13 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common_entry,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv,
+                                                     tt_common_entry->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
@@ -3139,6 +3158,7 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
        struct batadv_hashtable *hash = bat_priv->tt.local_hash;
        struct batadv_tt_common_entry *tt_common;
        struct batadv_tt_local_entry *tt_local;
+       struct batadv_softif_vlan *vlan;
        struct hlist_node *node_tmp;
        struct hlist_head *head;
        spinlock_t *list_lock; /* protects write access to the hash lists */
@@ -3167,6 +3187,12 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
                        tt_local = container_of(tt_common,
                                                struct batadv_tt_local_entry,
                                                common);
+
+                       /* decrease the reference held for this vlan */
+                       vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
+                       batadv_softif_vlan_free_ref(vlan);
+                       batadv_softif_vlan_free_ref(vlan);
+
                        batadv_tt_local_entry_free_ref(tt_local);
                }
                spin_unlock_bh(list_lock);
index 34891a5..8854c05 100644 (file)
@@ -687,6 +687,7 @@ struct batadv_priv_nc {
 
 /**
  * struct batadv_softif_vlan - per VLAN attributes set
+ * @bat_priv: pointer to the mesh object
  * @vid: VLAN identifier
  * @kobj: kobject for sysfs vlan subdirectory
  * @ap_isolation: AP isolation state
@@ -696,6 +697,7 @@ struct batadv_priv_nc {
  * @rcu: struct used for freeing in a RCU-safe manner
  */
 struct batadv_softif_vlan {
+       struct batadv_priv *bat_priv;
        unsigned short vid;
        struct kobject *kobj;
        atomic_t ap_isolation;          /* boolean */
index 5a7f81d..206b65c 100644 (file)
@@ -712,7 +712,7 @@ static int setup_netdev(struct l2cap_chan *chan, struct lowpan_dev **dev)
        unsigned long flags;
 
        netdev = alloc_netdev(sizeof(struct lowpan_dev), IFACE_NAME_TEMPLATE,
-                             netdev_setup);
+                             NET_NAME_UNKNOWN, netdev_setup);
        if (!netdev)
                return -ENOMEM;
 
index a841d3e..85bcc21 100644 (file)
@@ -538,8 +538,9 @@ int bnep_add_connection(struct bnep_connadd_req *req, struct socket *sock)
 
        /* session struct allocated as private part of net_device */
        dev = alloc_netdev(sizeof(struct bnep_session),
-                               (*req->device) ? req->device : "bnep%d",
-                               bnep_net_setup);
+                          (*req->device) ? req->device : "bnep%d",
+                          NET_NAME_UNKNOWN,
+                          bnep_net_setup);
        if (!dev)
                return -ENOMEM;
 
index b524c36..0bb9d8b 100644 (file)
@@ -93,7 +93,7 @@ static void fdb_rcu_free(struct rcu_head *head)
 static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
 {
        int err;
-       struct net_bridge_port *p, *tmp;
+       struct net_bridge_port *p;
 
        ASSERT_RTNL();
 
@@ -107,11 +107,9 @@ static void fdb_add_hw(struct net_bridge *br, const unsigned char *addr)
 
        return;
 undo:
-       list_for_each_entry(tmp, &br->port_list, list) {
-               if (tmp == p)
-                       break;
-               if (!br_promisc_port(tmp))
-                       dev_uc_del(tmp->dev, addr);
+       list_for_each_entry_continue_reverse(p, &br->port_list, list) {
+               if (!br_promisc_port(p))
+                       dev_uc_del(p->dev, addr);
        }
 }
 
@@ -678,6 +676,7 @@ errout:
 int br_fdb_dump(struct sk_buff *skb,
                struct netlink_callback *cb,
                struct net_device *dev,
+               struct net_device *filter_dev,
                int idx)
 {
        struct net_bridge *br = netdev_priv(dev);
@@ -693,6 +692,19 @@ int br_fdb_dump(struct sk_buff *skb,
                        if (idx < cb->args[0])
                                goto skip;
 
+                       if (filter_dev &&
+                           (!f->dst || f->dst->dev != filter_dev)) {
+                               if (filter_dev != dev)
+                                       goto skip;
+                               /* !f->dst is a speacial case for bridge
+                                * It means the MAC belongs to the bridge
+                                * Therefore need a little more filtering
+                                * we only want to dump the !f->dst case
+                                */
+                               if (f->dst)
+                                       goto skip;
+                       }
+
                        if (fdb_fill_info(skb, br, f,
                                          NETLINK_CB(cb->skb).portid,
                                          cb->nlh->nlmsg_seq,
index 3eca3fd..078d336 100644 (file)
@@ -344,7 +344,7 @@ int br_add_bridge(struct net *net, const char *name)
        struct net_device *dev;
        int res;
 
-       dev = alloc_netdev(sizeof(struct net_bridge), name,
+       dev = alloc_netdev(sizeof(struct net_bridge), name, NET_NAME_UNKNOWN,
                           br_dev_setup);
 
        if (!dev)
index abfa0b6..b4845f4 100644 (file)
@@ -2215,6 +2215,43 @@ unlock:
 }
 EXPORT_SYMBOL_GPL(br_multicast_list_adjacent);
 
+/**
+ * br_multicast_has_querier_anywhere - Checks for a querier on a bridge
+ * @dev: The bridge port providing the bridge on which to check for a querier
+ * @proto: The protocol family to check for: IGMP -> ETH_P_IP, MLD -> ETH_P_IPV6
+ *
+ * Checks whether the given interface has a bridge on top and if so returns
+ * true if a valid querier exists anywhere on the bridged link layer.
+ * Otherwise returns false.
+ */
+bool br_multicast_has_querier_anywhere(struct net_device *dev, int proto)
+{
+       struct net_bridge *br;
+       struct net_bridge_port *port;
+       struct ethhdr eth;
+       bool ret = false;
+
+       rcu_read_lock();
+       if (!br_port_exists(dev))
+               goto unlock;
+
+       port = br_port_get_rcu(dev);
+       if (!port || !port->br)
+               goto unlock;
+
+       br = port->br;
+
+       memset(&eth, 0, sizeof(eth));
+       eth.h_proto = htons(proto);
+
+       ret = br_multicast_querier_exists(br, &eth);
+
+unlock:
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(br_multicast_has_querier_anywhere);
+
 /**
  * br_multicast_has_querier_adjacent - Checks for a querier behind a bridge port
  * @dev: The bridge port adjacent to which to check for a querier
index 26edb51..cb5fcf6 100644 (file)
@@ -208,7 +208,6 @@ int br_getlink(struct sk_buff *skb, u32 pid, u32 seq,
        int err = 0;
        struct net_bridge_port *port = br_port_get_rtnl(dev);
 
-       /* not a bridge port and  */
        if (!port && !(filter_mask & RTEXT_FILTER_BRVLAN))
                goto out;
 
index 23caf5b..62a7fa2 100644 (file)
@@ -399,7 +399,7 @@ int br_fdb_delete(struct ndmsg *ndm, struct nlattr *tb[],
 int br_fdb_add(struct ndmsg *nlh, struct nlattr *tb[], struct net_device *dev,
               const unsigned char *addr, u16 nlh_flags);
 int br_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb,
-               struct net_device *dev, int idx);
+               struct net_device *dev, struct net_device *fdev, int idx);
 int br_fdb_sync_static(struct net_bridge *br, struct net_bridge_port *p);
 void br_fdb_unsync_static(struct net_bridge *br, struct net_bridge_port *p);
 
index 629dc77..9cebf47 100644 (file)
@@ -14,6 +14,15 @@ config NFT_BRIDGE_META
        help
          Add support for bridge dedicated meta key.
 
+config NFT_BRIDGE_REJECT
+       tristate "Netfilter nf_tables bridge reject support"
+       depends on NFT_REJECT && NFT_REJECT_IPV4 && NFT_REJECT_IPV6
+       help
+         Add support to reject packets.
+
+config NF_LOG_BRIDGE
+       tristate "Bridge packet logging"
+
 endif # NF_TABLES_BRIDGE
 
 menuconfig BRIDGE_NF_EBTABLES
@@ -202,22 +211,6 @@ config BRIDGE_EBT_LOG
 
          To compile it as a module, choose M here.  If unsure, say N.
 
-config BRIDGE_EBT_ULOG
-       tristate "ebt: ulog support (OBSOLETE)"
-       help
-         This option enables the old bridge-specific "ebt_ulog" implementation
-         which has been obsoleted by the new "nfnetlink_log" code (see
-         CONFIG_NETFILTER_NETLINK_LOG).
-
-         This option adds the ulog watcher, that you can use in any rule
-         in any ebtables table. The packet is passed to a userspace
-         logging daemon using netlink multicast sockets. This differs
-         from the log watcher in the sense that the complete packet is
-         sent to userspace instead of a descriptive text and that
-         netlink multicast sockets are used instead of the syslog.
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 config BRIDGE_EBT_NFLOG
        tristate "ebt: nflog support"
        help
index 6f2f394..be4d0ce 100644 (file)
@@ -4,6 +4,10 @@
 
 obj-$(CONFIG_NF_TABLES_BRIDGE) += nf_tables_bridge.o
 obj-$(CONFIG_NFT_BRIDGE_META)  += nft_meta_bridge.o
+obj-$(CONFIG_NFT_BRIDGE_REJECT)  += nft_reject_bridge.o
+
+# packet logging
+obj-$(CONFIG_NF_LOG_BRIDGE) += nf_log_bridge.o
 
 obj-$(CONFIG_BRIDGE_NF_EBTABLES) += ebtables.o
 
@@ -33,5 +37,4 @@ obj-$(CONFIG_BRIDGE_EBT_SNAT) += ebt_snat.o
 
 # watchers
 obj-$(CONFIG_BRIDGE_EBT_LOG) += ebt_log.o
-obj-$(CONFIG_BRIDGE_EBT_ULOG) += ebt_ulog.o
 obj-$(CONFIG_BRIDGE_EBT_NFLOG) += ebt_nflog.o
index 5322a36..17f2e4b 100644 (file)
@@ -186,6 +186,10 @@ ebt_log_tg(struct sk_buff *skb, const struct xt_action_param *par)
        li.u.log.level = info->loglevel;
        li.u.log.logflags = info->bitmask;
 
+       /* Remember that we have to use ebt_log_packet() not to break backward
+        * compatibility. We cannot use the default bridge packet logger via
+        * nf_log_packet() with NFT_LOG_TYPE_LOG here. --Pablo
+        */
        if (info->bitmask & EBT_LOG_NFLOG)
                nf_log_packet(net, NFPROTO_BRIDGE, par->hooknum, skb,
                              par->in, par->out, &li, "%s", info->prefix);
@@ -205,54 +209,13 @@ static struct xt_target ebt_log_tg_reg __read_mostly = {
        .me             = THIS_MODULE,
 };
 
-static struct nf_logger ebt_log_logger __read_mostly = {
-       .name           = "ebt_log",
-       .logfn          = &ebt_log_packet,
-       .me             = THIS_MODULE,
-};
-
-static int __net_init ebt_log_net_init(struct net *net)
-{
-       nf_log_set(net, NFPROTO_BRIDGE, &ebt_log_logger);
-       return 0;
-}
-
-static void __net_exit ebt_log_net_fini(struct net *net)
-{
-       nf_log_unset(net, &ebt_log_logger);
-}
-
-static struct pernet_operations ebt_log_net_ops = {
-       .init = ebt_log_net_init,
-       .exit = ebt_log_net_fini,
-};
-
 static int __init ebt_log_init(void)
 {
-       int ret;
-
-       ret = register_pernet_subsys(&ebt_log_net_ops);
-       if (ret < 0)
-               goto err_pernet;
-
-       ret = xt_register_target(&ebt_log_tg_reg);
-       if (ret < 0)
-               goto err_target;
-
-       nf_log_register(NFPROTO_BRIDGE, &ebt_log_logger);
-
-       return ret;
-
-err_target:
-       unregister_pernet_subsys(&ebt_log_net_ops);
-err_pernet:
-       return ret;
+       return xt_register_target(&ebt_log_tg_reg);
 }
 
 static void __exit ebt_log_fini(void)
 {
-       unregister_pernet_subsys(&ebt_log_net_ops);
-       nf_log_unregister(&ebt_log_logger);
        xt_unregister_target(&ebt_log_tg_reg);
 }
 
diff --git a/net/bridge/netfilter/ebt_ulog.c b/net/bridge/netfilter/ebt_ulog.c
deleted file mode 100644 (file)
index 7c470c3..0000000
+++ /dev/null
@@ -1,393 +0,0 @@
-/*
- * netfilter module for userspace bridged Ethernet frames logging daemons
- *
- *     Authors:
- *     Bart De Schuymer <bdschuym@pandora.be>
- *     Harald Welte <laforge@netfilter.org>
- *
- *  November, 2004
- *
- * Based on ipt_ULOG.c, which is
- * (C) 2000-2002 by Harald Welte <laforge@netfilter.org>
- *
- * This module accepts two parameters:
- *
- * nlbufsiz:
- *   The parameter specifies how big the buffer for each netlink multicast
- * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
- * get accumulated in the kernel until they are sent to userspace. It is
- * NOT possible to allocate more than 128kB, and it is strongly discouraged,
- * because atomically allocating 128kB inside the network rx softirq is not
- * reliable. Please also keep in mind that this buffer size is allocated for
- * each nlgroup you are using, so the total kernel memory usage increases
- * by that factor.
- *
- * flushtimeout:
- *   Specify, after how many hundredths of a second the queue should be
- *   flushed even if it is not full yet.
- *
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/socket.h>
-#include <linux/skbuff.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <net/netlink.h>
-#include <linux/netdevice.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_bridge/ebtables.h>
-#include <linux/netfilter_bridge/ebt_ulog.h>
-#include <net/netfilter/nf_log.h>
-#include <net/netns/generic.h>
-#include <net/sock.h>
-#include "../br_private.h"
-
-static unsigned int nlbufsiz = NLMSG_GOODSIZE;
-module_param(nlbufsiz, uint, 0600);
-MODULE_PARM_DESC(nlbufsiz, "netlink buffer size (number of bytes) "
-                          "(defaults to 4096)");
-
-static unsigned int flushtimeout = 10;
-module_param(flushtimeout, uint, 0600);
-MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths ofa second) "
-                              "(defaults to 10)");
-
-typedef struct {
-       unsigned int qlen;              /* number of nlmsgs' in the skb */
-       struct nlmsghdr *lastnlh;       /* netlink header of last msg in skb */
-       struct sk_buff *skb;            /* the pre-allocated skb */
-       struct timer_list timer;        /* the timer function */
-       spinlock_t lock;                /* the per-queue lock */
-} ebt_ulog_buff_t;
-
-static int ebt_ulog_net_id __read_mostly;
-struct ebt_ulog_net {
-       unsigned int nlgroup[EBT_ULOG_MAXNLGROUPS];
-       ebt_ulog_buff_t ulog_buffers[EBT_ULOG_MAXNLGROUPS];
-       struct sock *ebtulognl;
-};
-
-static struct ebt_ulog_net *ebt_ulog_pernet(struct net *net)
-{
-       return net_generic(net, ebt_ulog_net_id);
-}
-
-/* send one ulog_buff_t to userspace */
-static void ulog_send(struct ebt_ulog_net *ebt, unsigned int nlgroup)
-{
-       ebt_ulog_buff_t *ub = &ebt->ulog_buffers[nlgroup];
-
-       del_timer(&ub->timer);
-
-       if (!ub->skb)
-               return;
-
-       /* last nlmsg needs NLMSG_DONE */
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_type = NLMSG_DONE;
-
-       NETLINK_CB(ub->skb).dst_group = nlgroup + 1;
-       netlink_broadcast(ebt->ebtulognl, ub->skb, 0, nlgroup + 1, GFP_ATOMIC);
-
-       ub->qlen = 0;
-       ub->skb = NULL;
-}
-
-/* timer function to flush queue in flushtimeout time */
-static void ulog_timer(unsigned long data)
-{
-       struct ebt_ulog_net *ebt = container_of((void *)data,
-                                               struct ebt_ulog_net,
-                                               nlgroup[*(unsigned int *)data]);
-
-       ebt_ulog_buff_t *ub = &ebt->ulog_buffers[*(unsigned int *)data];
-       spin_lock_bh(&ub->lock);
-       if (ub->skb)
-               ulog_send(ebt, *(unsigned int *)data);
-       spin_unlock_bh(&ub->lock);
-}
-
-static struct sk_buff *ulog_alloc_skb(unsigned int size)
-{
-       struct sk_buff *skb;
-       unsigned int n;
-
-       n = max(size, nlbufsiz);
-       skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
-       if (!skb) {
-               if (n > size) {
-                       /* try to allocate only as much as we need for
-                        * current packet */
-                       skb = alloc_skb(size, GFP_ATOMIC);
-                       if (!skb)
-                               pr_debug("cannot even allocate buffer of size %ub\n",
-                                        size);
-               }
-       }
-
-       return skb;
-}
-
-static void ebt_ulog_packet(struct net *net, unsigned int hooknr,
-                           const struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
-                           const struct ebt_ulog_info *uloginfo,
-                           const char *prefix)
-{
-       ebt_ulog_packet_msg_t *pm;
-       size_t size, copy_len;
-       struct nlmsghdr *nlh;
-       struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
-       unsigned int group = uloginfo->nlgroup;
-       ebt_ulog_buff_t *ub = &ebt->ulog_buffers[group];
-       spinlock_t *lock = &ub->lock;
-       ktime_t kt;
-
-       if ((uloginfo->cprange == 0) ||
-           (uloginfo->cprange > skb->len + ETH_HLEN))
-               copy_len = skb->len + ETH_HLEN;
-       else
-               copy_len = uloginfo->cprange;
-
-       size = nlmsg_total_size(sizeof(*pm) + copy_len);
-       if (size > nlbufsiz) {
-               pr_debug("Size %Zd needed, but nlbufsiz=%d\n", size, nlbufsiz);
-               return;
-       }
-
-       spin_lock_bh(lock);
-
-       if (!ub->skb) {
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto unlock;
-       } else if (size > skb_tailroom(ub->skb)) {
-               ulog_send(ebt, group);
-
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto unlock;
-       }
-
-       nlh = nlmsg_put(ub->skb, 0, ub->qlen, 0,
-                       size - NLMSG_ALIGN(sizeof(*nlh)), 0);
-       if (!nlh) {
-               kfree_skb(ub->skb);
-               ub->skb = NULL;
-               goto unlock;
-       }
-       ub->qlen++;
-
-       pm = nlmsg_data(nlh);
-       memset(pm, 0, sizeof(*pm));
-
-       /* Fill in the ulog data */
-       pm->version = EBT_ULOG_VERSION;
-       kt = ktime_get_real();
-       pm->stamp = ktime_to_timeval(kt);
-       if (ub->qlen == 1)
-               ub->skb->tstamp = kt;
-       pm->data_len = copy_len;
-       pm->mark = skb->mark;
-       pm->hook = hooknr;
-       if (uloginfo->prefix != NULL)
-               strcpy(pm->prefix, uloginfo->prefix);
-
-       if (in) {
-               strcpy(pm->physindev, in->name);
-               /* If in isn't a bridge, then physindev==indev */
-               if (br_port_exists(in))
-                       /* rcu_read_lock()ed by nf_hook_slow */
-                       strcpy(pm->indev, br_port_get_rcu(in)->br->dev->name);
-               else
-                       strcpy(pm->indev, in->name);
-       }
-
-       if (out) {
-               /* If out exists, then out is a bridge port */
-               strcpy(pm->physoutdev, out->name);
-               /* rcu_read_lock()ed by nf_hook_slow */
-               strcpy(pm->outdev, br_port_get_rcu(out)->br->dev->name);
-       }
-
-       if (skb_copy_bits(skb, -ETH_HLEN, pm->data, copy_len) < 0)
-               BUG();
-
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
-
-       ub->lastnlh = nlh;
-
-       if (ub->qlen >= uloginfo->qthreshold)
-               ulog_send(ebt, group);
-       else if (!timer_pending(&ub->timer)) {
-               ub->timer.expires = jiffies + flushtimeout * HZ / 100;
-               add_timer(&ub->timer);
-       }
-
-unlock:
-       spin_unlock_bh(lock);
-}
-
-/* this function is registered with the netfilter core */
-static void ebt_log_packet(struct net *net, u_int8_t pf, unsigned int hooknum,
-   const struct sk_buff *skb, const struct net_device *in,
-   const struct net_device *out, const struct nf_loginfo *li,
-   const char *prefix)
-{
-       struct ebt_ulog_info loginfo;
-
-       if (!li || li->type != NF_LOG_TYPE_ULOG) {
-               loginfo.nlgroup = EBT_ULOG_DEFAULT_NLGROUP;
-               loginfo.cprange = 0;
-               loginfo.qthreshold = EBT_ULOG_DEFAULT_QTHRESHOLD;
-               loginfo.prefix[0] = '\0';
-       } else {
-               loginfo.nlgroup = li->u.ulog.group;
-               loginfo.cprange = li->u.ulog.copy_len;
-               loginfo.qthreshold = li->u.ulog.qthreshold;
-               strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
-       }
-
-       ebt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
-}
-
-static unsigned int
-ebt_ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct net *net = dev_net(par->in ? par->in : par->out);
-
-       ebt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
-                       par->targinfo, NULL);
-       return EBT_CONTINUE;
-}
-
-static int ebt_ulog_tg_check(const struct xt_tgchk_param *par)
-{
-       struct ebt_ulog_info *uloginfo = par->targinfo;
-
-       if (!par->net->xt.ebt_ulog_warn_deprecated) {
-               pr_info("ebt_ulog is deprecated and it will be removed soon, "
-                       "use ebt_nflog instead\n");
-               par->net->xt.ebt_ulog_warn_deprecated = true;
-       }
-
-       if (uloginfo->nlgroup > 31)
-               return -EINVAL;
-
-       uloginfo->prefix[EBT_ULOG_PREFIX_LEN - 1] = '\0';
-
-       if (uloginfo->qthreshold > EBT_ULOG_MAX_QLEN)
-               uloginfo->qthreshold = EBT_ULOG_MAX_QLEN;
-
-       return 0;
-}
-
-static struct xt_target ebt_ulog_tg_reg __read_mostly = {
-       .name           = "ulog",
-       .revision       = 0,
-       .family         = NFPROTO_BRIDGE,
-       .target         = ebt_ulog_tg,
-       .checkentry     = ebt_ulog_tg_check,
-       .targetsize     = sizeof(struct ebt_ulog_info),
-       .me             = THIS_MODULE,
-};
-
-static struct nf_logger ebt_ulog_logger __read_mostly = {
-       .name           = "ebt_ulog",
-       .logfn          = &ebt_log_packet,
-       .me             = THIS_MODULE,
-};
-
-static int __net_init ebt_ulog_net_init(struct net *net)
-{
-       int i;
-       struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
-
-       struct netlink_kernel_cfg cfg = {
-               .groups = EBT_ULOG_MAXNLGROUPS,
-       };
-
-       /* initialize ulog_buffers */
-       for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
-               ebt->nlgroup[i] = i;
-               setup_timer(&ebt->ulog_buffers[i].timer, ulog_timer,
-                           (unsigned long)&ebt->nlgroup[i]);
-               spin_lock_init(&ebt->ulog_buffers[i].lock);
-       }
-
-       ebt->ebtulognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
-       if (!ebt->ebtulognl)
-               return -ENOMEM;
-
-       nf_log_set(net, NFPROTO_BRIDGE, &ebt_ulog_logger);
-       return 0;
-}
-
-static void __net_exit ebt_ulog_net_fini(struct net *net)
-{
-       int i;
-       struct ebt_ulog_net *ebt = ebt_ulog_pernet(net);
-
-       nf_log_unset(net, &ebt_ulog_logger);
-       for (i = 0; i < EBT_ULOG_MAXNLGROUPS; i++) {
-               ebt_ulog_buff_t *ub = &ebt->ulog_buffers[i];
-               del_timer(&ub->timer);
-
-               if (ub->skb) {
-                       kfree_skb(ub->skb);
-                       ub->skb = NULL;
-               }
-       }
-       netlink_kernel_release(ebt->ebtulognl);
-}
-
-static struct pernet_operations ebt_ulog_net_ops = {
-       .init = ebt_ulog_net_init,
-       .exit = ebt_ulog_net_fini,
-       .id   = &ebt_ulog_net_id,
-       .size = sizeof(struct ebt_ulog_net),
-};
-
-static int __init ebt_ulog_init(void)
-{
-       int ret;
-
-       if (nlbufsiz >= 128*1024) {
-               pr_warn("Netlink buffer has to be <= 128kB,"
-                       "please try a smaller nlbufsiz parameter.\n");
-               return -EINVAL;
-       }
-
-       ret = register_pernet_subsys(&ebt_ulog_net_ops);
-       if (ret)
-               goto out_pernet;
-
-       ret = xt_register_target(&ebt_ulog_tg_reg);
-       if (ret)
-               goto out_target;
-
-       nf_log_register(NFPROTO_BRIDGE, &ebt_ulog_logger);
-
-       return 0;
-
-out_target:
-       unregister_pernet_subsys(&ebt_ulog_net_ops);
-out_pernet:
-       return ret;
-}
-
-static void __exit ebt_ulog_fini(void)
-{
-       nf_log_unregister(&ebt_ulog_logger);
-       xt_unregister_target(&ebt_ulog_tg_reg);
-       unregister_pernet_subsys(&ebt_ulog_net_ops);
-}
-
-module_init(ebt_ulog_init);
-module_exit(ebt_ulog_fini);
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
-MODULE_DESCRIPTION("Ebtables: Packet logging to netlink using ULOG");
diff --git a/net/bridge/netfilter/nf_log_bridge.c b/net/bridge/netfilter/nf_log_bridge.c
new file mode 100644 (file)
index 0000000..5d9953a
--- /dev/null
@@ -0,0 +1,96 @@
+/*
+ * (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_bridge.h>
+#include <linux/ip.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_log.h>
+
+static void nf_log_bridge_packet(struct net *net, u_int8_t pf,
+                                unsigned int hooknum,
+                                const struct sk_buff *skb,
+                                const struct net_device *in,
+                                const struct net_device *out,
+                                const struct nf_loginfo *loginfo,
+                                const char *prefix)
+{
+       switch (eth_hdr(skb)->h_proto) {
+       case htons(ETH_P_IP):
+               nf_log_packet(net, NFPROTO_IPV4, hooknum, skb, in, out,
+                             loginfo, "%s", prefix);
+               break;
+       case htons(ETH_P_IPV6):
+               nf_log_packet(net, NFPROTO_IPV6, hooknum, skb, in, out,
+                             loginfo, "%s", prefix);
+               break;
+       case htons(ETH_P_ARP):
+       case htons(ETH_P_RARP):
+               nf_log_packet(net, NFPROTO_ARP, hooknum, skb, in, out,
+                             loginfo, "%s", prefix);
+               break;
+       }
+}
+
+static struct nf_logger nf_bridge_logger __read_mostly = {
+       .name           = "nf_log_bridge",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_bridge_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_bridge_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_BRIDGE, &nf_bridge_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_bridge_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_bridge_logger);
+}
+
+static struct pernet_operations nf_log_bridge_net_ops = {
+       .init = nf_log_bridge_net_init,
+       .exit = nf_log_bridge_net_exit,
+};
+
+static int __init nf_log_bridge_init(void)
+{
+       int ret;
+
+       /* Request to load the real packet loggers. */
+       nf_logger_request_module(NFPROTO_IPV4, NF_LOG_TYPE_LOG);
+       nf_logger_request_module(NFPROTO_IPV6, NF_LOG_TYPE_LOG);
+       nf_logger_request_module(NFPROTO_ARP, NF_LOG_TYPE_LOG);
+
+       ret = register_pernet_subsys(&nf_log_bridge_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_BRIDGE, &nf_bridge_logger);
+       return 0;
+}
+
+static void __exit nf_log_bridge_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_bridge_net_ops);
+       nf_log_unregister(&nf_bridge_logger);
+}
+
+module_init(nf_log_bridge_init);
+module_exit(nf_log_bridge_exit);
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter bridge packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 0);
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
new file mode 100644 (file)
index 0000000..ee3ffe9
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2014 Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netlink.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nft_reject.h>
+
+static void nft_reject_bridge_eval(const struct nft_expr *expr,
+                                struct nft_data data[NFT_REG_MAX + 1],
+                                const struct nft_pktinfo *pkt)
+{
+       switch (eth_hdr(pkt->skb)->h_proto) {
+       case htons(ETH_P_IP):
+               return nft_reject_ipv4_eval(expr, data, pkt);
+       case htons(ETH_P_IPV6):
+               return nft_reject_ipv6_eval(expr, data, pkt);
+       default:
+               /* No explicit way to reject this protocol, drop it. */
+               data[NFT_REG_VERDICT].verdict = NF_DROP;
+               break;
+       }
+}
+
+static struct nft_expr_type nft_reject_bridge_type;
+static const struct nft_expr_ops nft_reject_bridge_ops = {
+       .type           = &nft_reject_bridge_type,
+       .size           = NFT_EXPR_SIZE(sizeof(struct nft_reject)),
+       .eval           = nft_reject_bridge_eval,
+       .init           = nft_reject_init,
+       .dump           = nft_reject_dump,
+};
+
+static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
+       .family         = NFPROTO_BRIDGE,
+       .name           = "reject",
+       .ops            = &nft_reject_bridge_ops,
+       .policy         = nft_reject_policy,
+       .maxattr        = NFTA_REJECT_MAX,
+       .owner          = THIS_MODULE,
+};
+
+static int __init nft_reject_bridge_module_init(void)
+{
+       return nft_register_expr(&nft_reject_bridge_type);
+}
+
+static void __exit nft_reject_bridge_module_exit(void)
+{
+       nft_unregister_expr(&nft_reject_bridge_type);
+}
+
+module_init(nft_reject_bridge_module_init);
+module_exit(nft_reject_bridge_module_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_ALIAS_NFT_AF_EXPR(AF_BRIDGE, "reject");
index e843709..43f750e 100644 (file)
@@ -908,8 +908,7 @@ static int caif_release(struct socket *sock)
        sock->sk = NULL;
 
        WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir));
-       if (cf_sk->debugfs_socket_dir != NULL)
-               debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
+       debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
 
        lock_sock(&(cf_sk->sk));
        sk->sk_state = CAIF_DISCONNECTED;
index 0f45522..f5afda1 100644 (file)
@@ -547,7 +547,6 @@ static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
        default:
                pr_err("Unrecognized Control Frame\n");
                goto error;
-               break;
        }
        ret = 0;
 error:
index 9a76eaf..bc8aeef 100644 (file)
@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
 {
        int tot_len;
 
-       if (kern_msg->msg_namelen) {
+       if (kern_msg->msg_name && kern_msg->msg_namelen) {
                if (mode == VERIFY_READ) {
                        int err = move_addr_to_kernel(kern_msg->msg_name,
                                                      kern_msg->msg_namelen,
@@ -93,10 +93,11 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
                        if (err < 0)
                                return err;
                }
-               if (kern_msg->msg_name)
-                       kern_msg->msg_name = kern_address;
-       } else
+               kern_msg->msg_name = kern_address;
+       } else {
                kern_msg->msg_name = NULL;
+               kern_msg->msg_namelen = 0;
+       }
 
        tot_len = iov_from_user_compat_to_kern(kern_iov,
                                          (struct compat_iovec __user *)kern_msg->msg_iov,
index 30eedf6..b370230 100644 (file)
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly;   /* Taps */
 static struct list_head offload_base __read_mostly;
 
 static int netif_rx_internal(struct sk_buff *skb);
+static int call_netdevice_notifiers_info(unsigned long val,
+                                        struct net_device *dev,
+                                        struct netdev_notifier_info *info);
 
 /*
  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1082,6 +1085,7 @@ static int dev_get_valid_name(struct net *net,
  */
 int dev_change_name(struct net_device *dev, const char *newname)
 {
+       unsigned char old_assign_type;
        char oldname[IFNAMSIZ];
        int err = 0;
        int ret;
@@ -1109,10 +1113,17 @@ int dev_change_name(struct net_device *dev, const char *newname)
                return err;
        }
 
+       if (oldname[0] && !strchr(oldname, '%'))
+               netdev_info(dev, "renamed from %s\n", oldname);
+
+       old_assign_type = dev->name_assign_type;
+       dev->name_assign_type = NET_NAME_RENAMED;
+
 rollback:
        ret = device_rename(&dev->dev, dev->name);
        if (ret) {
                memcpy(dev->name, oldname, IFNAMSIZ);
+               dev->name_assign_type = old_assign_type;
                write_seqcount_end(&devnet_rename_seq);
                return ret;
        }
@@ -1141,6 +1152,8 @@ rollback:
                        write_seqcount_begin(&devnet_rename_seq);
                        memcpy(dev->name, oldname, IFNAMSIZ);
                        memcpy(oldname, newname, IFNAMSIZ);
+                       dev->name_assign_type = old_assign_type;
+                       old_assign_type = NET_NAME_RENAMED;
                        goto rollback;
                } else {
                        pr_err("%s: name change rollback failed: %d\n",
@@ -1207,7 +1220,11 @@ EXPORT_SYMBOL(netdev_features_change);
 void netdev_state_change(struct net_device *dev)
 {
        if (dev->flags & IFF_UP) {
-               call_netdevice_notifiers(NETDEV_CHANGE, dev);
+               struct netdev_notifier_change_info change_info;
+
+               change_info.flags_changed = 0;
+               call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
+                                             &change_info.info);
                rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
        }
 }
@@ -2309,7 +2326,7 @@ __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
         */
        if (type == htons(ETH_P_8021Q) || type == htons(ETH_P_8021AD)) {
                if (vlan_depth) {
-                       if (unlikely(WARN_ON(vlan_depth < VLAN_HLEN)))
+                       if (WARN_ON(vlan_depth < VLAN_HLEN))
                                return 0;
                        vlan_depth -= VLAN_HLEN;
                } else {
@@ -2407,8 +2424,8 @@ struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
 
                skb_warn_bad_offload(skb);
 
-               if (skb_header_cloned(skb) &&
-                   (err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
+               err = skb_cow_head(skb, 0);
+               if (err < 0)
                        return ERR_PTR(err);
        }
 
@@ -2738,8 +2755,8 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
        /*
         * Heuristic to force contended enqueues to serialize on a
         * separate lock before trying to get qdisc main lock.
-        * This permits __QDISC_STATE_RUNNING owner to get the lock more often
-        * and dequeue packets faster.
+        * This permits __QDISC___STATE_RUNNING owner to get the lock more
+        * often and dequeue packets faster.
         */
        contended = qdisc_is_running(q);
        if (unlikely(contended))
@@ -4089,6 +4106,8 @@ static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
        skb->vlan_tci = 0;
        skb->dev = napi->dev;
        skb->skb_iif = 0;
+       skb->encapsulation = 0;
+       skb_shinfo(skb)->gso_type = 0;
        skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
 
        napi->skb = skb;
@@ -4227,9 +4246,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
 #endif
        napi->weight = weight_p;
        local_irq_disable();
-       while (work < quota) {
+       while (1) {
                struct sk_buff *skb;
-               unsigned int qlen;
 
                while ((skb = __skb_dequeue(&sd->process_queue))) {
                        local_irq_enable();
@@ -4243,24 +4261,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
                }
 
                rps_lock(sd);
-               qlen = skb_queue_len(&sd->input_pkt_queue);
-               if (qlen)
-                       skb_queue_splice_tail_init(&sd->input_pkt_queue,
-                                                  &sd->process_queue);
-
-               if (qlen < quota - work) {
+               if (skb_queue_empty(&sd->input_pkt_queue)) {
                        /*
                         * Inline a custom version of __napi_complete().
                         * only current cpu owns and manipulates this napi,
-                        * and NAPI_STATE_SCHED is the only possible flag set on backlog.
-                        * we can use a plain write instead of clear_bit(),
+                        * and NAPI_STATE_SCHED is the only possible flag set
+                        * on backlog.
+                        * We can use a plain write instead of clear_bit(),
                         * and we dont need an smp_mb() memory barrier.
                         */
                        list_del(&napi->poll_list);
                        napi->state = 0;
+                       rps_unlock(sd);
 
-                       quota = work + qlen;
+                       break;
                }
+
+               skb_queue_splice_tail_init(&sd->input_pkt_queue,
+                                          &sd->process_queue);
                rps_unlock(sd);
        }
        local_irq_enable();
@@ -5432,13 +5450,9 @@ int __dev_change_flags(struct net_device *dev, unsigned int flags)
         */
 
        ret = 0;
-       if ((old_flags ^ flags) & IFF_UP) {     /* Bit is different  ? */
+       if ((old_flags ^ flags) & IFF_UP)
                ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
 
-               if (!ret)
-                       dev_set_rx_mode(dev);
-       }
-
        if ((flags ^ dev->gflags) & IFF_PROMISC) {
                int inc = (flags & IFF_PROMISC) ? 1 : -1;
                unsigned int old_flags = dev->flags;
@@ -6438,17 +6452,19 @@ void netdev_freemem(struct net_device *dev)
 
 /**
  *     alloc_netdev_mqs - allocate network device
- *     @sizeof_priv:   size of private data to allocate space for
- *     @name:          device name format string
- *     @setup:         callback to initialize device
- *     @txqs:          the number of TX subqueues to allocate
- *     @rxqs:          the number of RX subqueues to allocate
+ *     @sizeof_priv:           size of private data to allocate space for
+ *     @name:                  device name format string
+ *     @name_assign_type:      origin of device name
+ *     @setup:                 callback to initialize device
+ *     @txqs:                  the number of TX subqueues to allocate
+ *     @rxqs:                  the number of RX subqueues to allocate
  *
  *     Allocates a struct net_device with private data area for driver use
  *     and performs basic initialization.  Also allocates subqueue structs
  *     for each queue on the device.
  */
 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
+               unsigned char name_assign_type,
                void (*setup)(struct net_device *),
                unsigned int txqs, unsigned int rxqs)
 {
@@ -6527,6 +6543,7 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
 #endif
 
        strcpy(dev->name, name);
+       dev->name_assign_type = name_assign_type;
        dev->group = INIT_NETDEV_GROUP;
        if (!dev->ethtool_ops)
                dev->ethtool_ops = &default_ethtool_ops;
@@ -6938,12 +6955,14 @@ static int __netdev_printk(const char *level, const struct net_device *dev,
        if (dev && dev->dev.parent) {
                r = dev_printk_emit(level[1] - '0',
                                    dev->dev.parent,
-                                   "%s %s %s: %pV",
+                                   "%s %s %s%s: %pV",
                                    dev_driver_string(dev->dev.parent),
                                    dev_name(dev->dev.parent),
-                                   netdev_name(dev), vaf);
+                                   netdev_name(dev), netdev_reg_state(dev),
+                                   vaf);
        } else if (dev) {
-               r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
+               r = printk("%s%s%s: %pV", level, netdev_name(dev),
+                          netdev_reg_state(dev), vaf);
        } else {
                r = printk("%s(NULL net_device): %pV", level, vaf);
        }
@@ -7095,7 +7114,7 @@ static void __net_exit default_device_exit_batch(struct list_head *net_list)
        rtnl_lock_unregistering(net_list);
        list_for_each_entry(net, net_list, exit_list) {
                for_each_netdev_reverse(net, dev) {
-                       if (dev->rtnl_link_ops)
+                       if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
                                dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
                        else
                                unregister_netdevice_queue(dev, &dev_kill_list);
index e70301e..50f9a9d 100644 (file)
@@ -289,10 +289,8 @@ static int net_dm_cmd_trace(struct sk_buff *skb,
        switch (info->genlhdr->cmd) {
        case NET_DM_CMD_START:
                return set_all_monitor_traces(TRACE_ON);
-               break;
        case NET_DM_CMD_STOP:
                return set_all_monitor_traces(TRACE_OFF);
-               break;
        }
 
        return -ENOTSUPP;
index 80d6286..a028409 100644 (file)
@@ -269,6 +269,15 @@ again:
 }
 EXPORT_SYMBOL(dst_destroy);
 
+static void dst_destroy_rcu(struct rcu_head *head)
+{
+       struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
+
+       dst = dst_destroy(dst);
+       if (dst)
+               __dst_free(dst);
+}
+
 void dst_release(struct dst_entry *dst)
 {
        if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
 
                newrefcnt = atomic_dec_return(&dst->__refcnt);
                WARN_ON(newrefcnt < 0);
-               if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) {
-                       dst = dst_destroy(dst);
-                       if (dst)
-                               __dst_free(dst);
-               }
+               if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
+                       call_rcu(&dst->rcu_head, dst_destroy_rcu);
        }
 }
 EXPORT_SYMBOL(dst_release);
index 735fad8..d814b8a 100644 (file)
@@ -18,7 +18,7 @@
  * 2 of the License, or (at your option) any later version.
  *
  * Andi Kleen - Fix a few bad bugs and races.
- * Kris Katterjohn - Added many additional checks in sk_chk_filter()
+ * Kris Katterjohn - Added many additional checks in bpf_check_classic()
  */
 
 #include <linux/module.h>
 #include <linux/seccomp.h>
 #include <linux/if_vlan.h>
 
-/* Registers */
-#define BPF_R0 regs[BPF_REG_0]
-#define BPF_R1 regs[BPF_REG_1]
-#define BPF_R2 regs[BPF_REG_2]
-#define BPF_R3 regs[BPF_REG_3]
-#define BPF_R4 regs[BPF_REG_4]
-#define BPF_R5 regs[BPF_REG_5]
-#define BPF_R6 regs[BPF_REG_6]
-#define BPF_R7 regs[BPF_REG_7]
-#define BPF_R8 regs[BPF_REG_8]
-#define BPF_R9 regs[BPF_REG_9]
-#define BPF_R10        regs[BPF_REG_10]
-
-/* Named registers */
-#define DST    regs[insn->dst_reg]
-#define SRC    regs[insn->src_reg]
-#define FP     regs[BPF_REG_FP]
-#define ARG1   regs[BPF_REG_ARG1]
-#define CTX    regs[BPF_REG_CTX]
-#define IMM    insn->imm
-
-/* No hurry in this branch
- *
- * Exported for the bpf jit load helper.
- */
-void *bpf_internal_load_pointer_neg_helper(const struct sk_buff *skb, int k, unsigned int size)
-{
-       u8 *ptr = NULL;
-
-       if (k >= SKF_NET_OFF)
-               ptr = skb_network_header(skb) + k - SKF_NET_OFF;
-       else if (k >= SKF_LL_OFF)
-               ptr = skb_mac_header(skb) + k - SKF_LL_OFF;
-       if (ptr >= skb->head && ptr + size <= skb_tail_pointer(skb))
-               return ptr;
-
-       return NULL;
-}
-
-static inline void *load_pointer(const struct sk_buff *skb, int k,
-                                unsigned int size, void *buffer)
-{
-       if (k >= 0)
-               return skb_header_pointer(skb, k, size, buffer);
-
-       return bpf_internal_load_pointer_neg_helper(skb, k, size);
-}
-
 /**
  *     sk_filter - run a packet through a socket filter
  *     @sk: sock associated with &sk_buff
@@ -135,451 +87,6 @@ int sk_filter(struct sock *sk, struct sk_buff *skb)
 }
 EXPORT_SYMBOL(sk_filter);
 
-/* Base function for offset calculation. Needs to go into .text section,
- * therefore keeping it non-static as well; will also be used by JITs
- * anyway later on, so do not let the compiler omit it.
- */
-noinline u64 __bpf_call_base(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5)
-{
-       return 0;
-}
-
-/**
- *     __sk_run_filter - run a filter on a given context
- *     @ctx: buffer to run the filter on
- *     @insn: filter to apply
- *
- * Decode and apply filter instructions to the skb->data. Return length to
- * keep, 0 for none. @ctx is the data we are operating on, @insn is the
- * array of filter instructions.
- */
-static unsigned int __sk_run_filter(void *ctx, const struct sock_filter_int *insn)
-{
-       u64 stack[MAX_BPF_STACK / sizeof(u64)];
-       u64 regs[MAX_BPF_REG], tmp;
-       static const void *jumptable[256] = {
-               [0 ... 255] = &&default_label,
-               /* Now overwrite non-defaults ... */
-               /* 32 bit ALU operations */
-               [BPF_ALU | BPF_ADD | BPF_X] = &&ALU_ADD_X,
-               [BPF_ALU | BPF_ADD | BPF_K] = &&ALU_ADD_K,
-               [BPF_ALU | BPF_SUB | BPF_X] = &&ALU_SUB_X,
-               [BPF_ALU | BPF_SUB | BPF_K] = &&ALU_SUB_K,
-               [BPF_ALU | BPF_AND | BPF_X] = &&ALU_AND_X,
-               [BPF_ALU | BPF_AND | BPF_K] = &&ALU_AND_K,
-               [BPF_ALU | BPF_OR | BPF_X]  = &&ALU_OR_X,
-               [BPF_ALU | BPF_OR | BPF_K]  = &&ALU_OR_K,
-               [BPF_ALU | BPF_LSH | BPF_X] = &&ALU_LSH_X,
-               [BPF_ALU | BPF_LSH | BPF_K] = &&ALU_LSH_K,
-               [BPF_ALU | BPF_RSH | BPF_X] = &&ALU_RSH_X,
-               [BPF_ALU | BPF_RSH | BPF_K] = &&ALU_RSH_K,
-               [BPF_ALU | BPF_XOR | BPF_X] = &&ALU_XOR_X,
-               [BPF_ALU | BPF_XOR | BPF_K] = &&ALU_XOR_K,
-               [BPF_ALU | BPF_MUL | BPF_X] = &&ALU_MUL_X,
-               [BPF_ALU | BPF_MUL | BPF_K] = &&ALU_MUL_K,
-               [BPF_ALU | BPF_MOV | BPF_X] = &&ALU_MOV_X,
-               [BPF_ALU | BPF_MOV | BPF_K] = &&ALU_MOV_K,
-               [BPF_ALU | BPF_DIV | BPF_X] = &&ALU_DIV_X,
-               [BPF_ALU | BPF_DIV | BPF_K] = &&ALU_DIV_K,
-               [BPF_ALU | BPF_MOD | BPF_X] = &&ALU_MOD_X,
-               [BPF_ALU | BPF_MOD | BPF_K] = &&ALU_MOD_K,
-               [BPF_ALU | BPF_NEG] = &&ALU_NEG,
-               [BPF_ALU | BPF_END | BPF_TO_BE] = &&ALU_END_TO_BE,
-               [BPF_ALU | BPF_END | BPF_TO_LE] = &&ALU_END_TO_LE,
-               /* 64 bit ALU operations */
-               [BPF_ALU64 | BPF_ADD | BPF_X] = &&ALU64_ADD_X,
-               [BPF_ALU64 | BPF_ADD | BPF_K] = &&ALU64_ADD_K,
-               [BPF_ALU64 | BPF_SUB | BPF_X] = &&ALU64_SUB_X,
-               [BPF_ALU64 | BPF_SUB | BPF_K] = &&ALU64_SUB_K,
-               [BPF_ALU64 | BPF_AND | BPF_X] = &&ALU64_AND_X,
-               [BPF_ALU64 | BPF_AND | BPF_K] = &&ALU64_AND_K,
-               [BPF_ALU64 | BPF_OR | BPF_X] = &&ALU64_OR_X,
-               [BPF_ALU64 | BPF_OR | BPF_K] = &&ALU64_OR_K,
-               [BPF_ALU64 | BPF_LSH | BPF_X] = &&ALU64_LSH_X,
-               [BPF_ALU64 | BPF_LSH | BPF_K] = &&ALU64_LSH_K,
-               [BPF_ALU64 | BPF_RSH | BPF_X] = &&ALU64_RSH_X,
-               [BPF_ALU64 | BPF_RSH | BPF_K] = &&ALU64_RSH_K,
-               [BPF_ALU64 | BPF_XOR | BPF_X] = &&ALU64_XOR_X,
-               [BPF_ALU64 | BPF_XOR | BPF_K] = &&ALU64_XOR_K,
-               [BPF_ALU64 | BPF_MUL | BPF_X] = &&ALU64_MUL_X,
-               [BPF_ALU64 | BPF_MUL | BPF_K] = &&ALU64_MUL_K,
-               [BPF_ALU64 | BPF_MOV | BPF_X] = &&ALU64_MOV_X,
-               [BPF_ALU64 | BPF_MOV | BPF_K] = &&ALU64_MOV_K,
-               [BPF_ALU64 | BPF_ARSH | BPF_X] = &&ALU64_ARSH_X,
-               [BPF_ALU64 | BPF_ARSH | BPF_K] = &&ALU64_ARSH_K,
-               [BPF_ALU64 | BPF_DIV | BPF_X] = &&ALU64_DIV_X,
-               [BPF_ALU64 | BPF_DIV | BPF_K] = &&ALU64_DIV_K,
-               [BPF_ALU64 | BPF_MOD | BPF_X] = &&ALU64_MOD_X,
-               [BPF_ALU64 | BPF_MOD | BPF_K] = &&ALU64_MOD_K,
-               [BPF_ALU64 | BPF_NEG] = &&ALU64_NEG,
-               /* Call instruction */
-               [BPF_JMP | BPF_CALL] = &&JMP_CALL,
-               /* Jumps */
-               [BPF_JMP | BPF_JA] = &&JMP_JA,
-               [BPF_JMP | BPF_JEQ | BPF_X] = &&JMP_JEQ_X,
-               [BPF_JMP | BPF_JEQ | BPF_K] = &&JMP_JEQ_K,
-               [BPF_JMP | BPF_JNE | BPF_X] = &&JMP_JNE_X,
-               [BPF_JMP | BPF_JNE | BPF_K] = &&JMP_JNE_K,
-               [BPF_JMP | BPF_JGT | BPF_X] = &&JMP_JGT_X,
-               [BPF_JMP | BPF_JGT | BPF_K] = &&JMP_JGT_K,
-               [BPF_JMP | BPF_JGE | BPF_X] = &&JMP_JGE_X,
-               [BPF_JMP | BPF_JGE | BPF_K] = &&JMP_JGE_K,
-               [BPF_JMP | BPF_JSGT | BPF_X] = &&JMP_JSGT_X,
-               [BPF_JMP | BPF_JSGT | BPF_K] = &&JMP_JSGT_K,
-               [BPF_JMP | BPF_JSGE | BPF_X] = &&JMP_JSGE_X,
-               [BPF_JMP | BPF_JSGE | BPF_K] = &&JMP_JSGE_K,
-               [BPF_JMP | BPF_JSET | BPF_X] = &&JMP_JSET_X,
-               [BPF_JMP | BPF_JSET | BPF_K] = &&JMP_JSET_K,
-               /* Program return */
-               [BPF_JMP | BPF_EXIT] = &&JMP_EXIT,
-               /* Store instructions */
-               [BPF_STX | BPF_MEM | BPF_B] = &&STX_MEM_B,
-               [BPF_STX | BPF_MEM | BPF_H] = &&STX_MEM_H,
-               [BPF_STX | BPF_MEM | BPF_W] = &&STX_MEM_W,
-               [BPF_STX | BPF_MEM | BPF_DW] = &&STX_MEM_DW,
-               [BPF_STX | BPF_XADD | BPF_W] = &&STX_XADD_W,
-               [BPF_STX | BPF_XADD | BPF_DW] = &&STX_XADD_DW,
-               [BPF_ST | BPF_MEM | BPF_B] = &&ST_MEM_B,
-               [BPF_ST | BPF_MEM | BPF_H] = &&ST_MEM_H,
-               [BPF_ST | BPF_MEM | BPF_W] = &&ST_MEM_W,
-               [BPF_ST | BPF_MEM | BPF_DW] = &&ST_MEM_DW,
-               /* Load instructions */
-               [BPF_LDX | BPF_MEM | BPF_B] = &&LDX_MEM_B,
-               [BPF_LDX | BPF_MEM | BPF_H] = &&LDX_MEM_H,
-               [BPF_LDX | BPF_MEM | BPF_W] = &&LDX_MEM_W,
-               [BPF_LDX | BPF_MEM | BPF_DW] = &&LDX_MEM_DW,
-               [BPF_LD | BPF_ABS | BPF_W] = &&LD_ABS_W,
-               [BPF_LD | BPF_ABS | BPF_H] = &&LD_ABS_H,
-               [BPF_LD | BPF_ABS | BPF_B] = &&LD_ABS_B,
-               [BPF_LD | BPF_IND | BPF_W] = &&LD_IND_W,
-               [BPF_LD | BPF_IND | BPF_H] = &&LD_IND_H,
-               [BPF_LD | BPF_IND | BPF_B] = &&LD_IND_B,
-       };
-       void *ptr;
-       int off;
-
-#define CONT    ({ insn++; goto select_insn; })
-#define CONT_JMP ({ insn++; goto select_insn; })
-
-       FP = (u64) (unsigned long) &stack[ARRAY_SIZE(stack)];
-       ARG1 = (u64) (unsigned long) ctx;
-
-       /* Registers used in classic BPF programs need to be reset first. */
-       regs[BPF_REG_A] = 0;
-       regs[BPF_REG_X] = 0;
-
-select_insn:
-       goto *jumptable[insn->code];
-
-       /* ALU */
-#define ALU(OPCODE, OP)                        \
-       ALU64_##OPCODE##_X:             \
-               DST = DST OP SRC;       \
-               CONT;                   \
-       ALU_##OPCODE##_X:               \
-               DST = (u32) DST OP (u32) SRC;   \
-               CONT;                   \
-       ALU64_##OPCODE##_K:             \
-               DST = DST OP IMM;               \
-               CONT;                   \
-       ALU_##OPCODE##_K:               \
-               DST = (u32) DST OP (u32) IMM;   \
-               CONT;
-
-       ALU(ADD,  +)
-       ALU(SUB,  -)
-       ALU(AND,  &)
-       ALU(OR,   |)
-       ALU(LSH, <<)
-       ALU(RSH, >>)
-       ALU(XOR,  ^)
-       ALU(MUL,  *)
-#undef ALU
-       ALU_NEG:
-               DST = (u32) -DST;
-               CONT;
-       ALU64_NEG:
-               DST = -DST;
-               CONT;
-       ALU_MOV_X:
-               DST = (u32) SRC;
-               CONT;
-       ALU_MOV_K:
-               DST = (u32) IMM;
-               CONT;
-       ALU64_MOV_X:
-               DST = SRC;
-               CONT;
-       ALU64_MOV_K:
-               DST = IMM;
-               CONT;
-       ALU64_ARSH_X:
-               (*(s64 *) &DST) >>= SRC;
-               CONT;
-       ALU64_ARSH_K:
-               (*(s64 *) &DST) >>= IMM;
-               CONT;
-       ALU64_MOD_X:
-               if (unlikely(SRC == 0))
-                       return 0;
-               tmp = DST;
-               DST = do_div(tmp, SRC);
-               CONT;
-       ALU_MOD_X:
-               if (unlikely(SRC == 0))
-                       return 0;
-               tmp = (u32) DST;
-               DST = do_div(tmp, (u32) SRC);
-               CONT;
-       ALU64_MOD_K:
-               tmp = DST;
-               DST = do_div(tmp, IMM);
-               CONT;
-       ALU_MOD_K:
-               tmp = (u32) DST;
-               DST = do_div(tmp, (u32) IMM);
-               CONT;
-       ALU64_DIV_X:
-               if (unlikely(SRC == 0))
-                       return 0;
-               do_div(DST, SRC);
-               CONT;
-       ALU_DIV_X:
-               if (unlikely(SRC == 0))
-                       return 0;
-               tmp = (u32) DST;
-               do_div(tmp, (u32) SRC);
-               DST = (u32) tmp;
-               CONT;
-       ALU64_DIV_K:
-               do_div(DST, IMM);
-               CONT;
-       ALU_DIV_K:
-               tmp = (u32) DST;
-               do_div(tmp, (u32) IMM);
-               DST = (u32) tmp;
-               CONT;
-       ALU_END_TO_BE:
-               switch (IMM) {
-               case 16:
-                       DST = (__force u16) cpu_to_be16(DST);
-                       break;
-               case 32:
-                       DST = (__force u32) cpu_to_be32(DST);
-                       break;
-               case 64:
-                       DST = (__force u64) cpu_to_be64(DST);
-                       break;
-               }
-               CONT;
-       ALU_END_TO_LE:
-               switch (IMM) {
-               case 16:
-                       DST = (__force u16) cpu_to_le16(DST);
-                       break;
-               case 32:
-                       DST = (__force u32) cpu_to_le32(DST);
-                       break;
-               case 64:
-                       DST = (__force u64) cpu_to_le64(DST);
-                       break;
-               }
-               CONT;
-
-       /* CALL */
-       JMP_CALL:
-               /* Function call scratches BPF_R1-BPF_R5 registers,
-                * preserves BPF_R6-BPF_R9, and stores return value
-                * into BPF_R0.
-                */
-               BPF_R0 = (__bpf_call_base + insn->imm)(BPF_R1, BPF_R2, BPF_R3,
-                                                      BPF_R4, BPF_R5);
-               CONT;
-
-       /* JMP */
-       JMP_JA:
-               insn += insn->off;
-               CONT;
-       JMP_JEQ_X:
-               if (DST == SRC) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JEQ_K:
-               if (DST == IMM) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JNE_X:
-               if (DST != SRC) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JNE_K:
-               if (DST != IMM) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JGT_X:
-               if (DST > SRC) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JGT_K:
-               if (DST > IMM) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JGE_X:
-               if (DST >= SRC) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JGE_K:
-               if (DST >= IMM) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JSGT_X:
-               if (((s64) DST) > ((s64) SRC)) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JSGT_K:
-               if (((s64) DST) > ((s64) IMM)) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JSGE_X:
-               if (((s64) DST) >= ((s64) SRC)) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JSGE_K:
-               if (((s64) DST) >= ((s64) IMM)) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JSET_X:
-               if (DST & SRC) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_JSET_K:
-               if (DST & IMM) {
-                       insn += insn->off;
-                       CONT_JMP;
-               }
-               CONT;
-       JMP_EXIT:
-               return BPF_R0;
-
-       /* STX and ST and LDX*/
-#define LDST(SIZEOP, SIZE)                                             \
-       STX_MEM_##SIZEOP:                                               \
-               *(SIZE *)(unsigned long) (DST + insn->off) = SRC;       \
-               CONT;                                                   \
-       ST_MEM_##SIZEOP:                                                \
-               *(SIZE *)(unsigned long) (DST + insn->off) = IMM;       \
-               CONT;                                                   \
-       LDX_MEM_##SIZEOP:                                               \
-               DST = *(SIZE *)(unsigned long) (SRC + insn->off);       \
-               CONT;
-
-       LDST(B,   u8)
-       LDST(H,  u16)
-       LDST(W,  u32)
-       LDST(DW, u64)
-#undef LDST
-       STX_XADD_W: /* lock xadd *(u32 *)(dst_reg + off16) += src_reg */
-               atomic_add((u32) SRC, (atomic_t *)(unsigned long)
-                          (DST + insn->off));
-               CONT;
-       STX_XADD_DW: /* lock xadd *(u64 *)(dst_reg + off16) += src_reg */
-               atomic64_add((u64) SRC, (atomic64_t *)(unsigned long)
-                            (DST + insn->off));
-               CONT;
-       LD_ABS_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + imm32)) */
-               off = IMM;
-load_word:
-               /* BPF_LD + BPD_ABS and BPF_LD + BPF_IND insns are
-                * only appearing in the programs where ctx ==
-                * skb. All programs keep 'ctx' in regs[BPF_REG_CTX]
-                * == BPF_R6, sk_convert_filter() saves it in BPF_R6,
-                * internal BPF verifier will check that BPF_R6 ==
-                * ctx.
-                *
-                * BPF_ABS and BPF_IND are wrappers of function calls,
-                * so they scratch BPF_R1-BPF_R5 registers, preserve
-                * BPF_R6-BPF_R9, and store return value into BPF_R0.
-                *
-                * Implicit input:
-                *   ctx == skb == BPF_R6 == CTX
-                *
-                * Explicit input:
-                *   SRC == any register
-                *   IMM == 32-bit immediate
-                *
-                * Output:
-                *   BPF_R0 - 8/16/32-bit skb data converted to cpu endianness
-                */
-
-               ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 4, &tmp);
-               if (likely(ptr != NULL)) {
-                       BPF_R0 = get_unaligned_be32(ptr);
-                       CONT;
-               }
-
-               return 0;
-       LD_ABS_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + imm32)) */
-               off = IMM;
-load_half:
-               ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 2, &tmp);
-               if (likely(ptr != NULL)) {
-                       BPF_R0 = get_unaligned_be16(ptr);
-                       CONT;
-               }
-
-               return 0;
-       LD_ABS_B: /* BPF_R0 = *(u8 *) (skb->data + imm32) */
-               off = IMM;
-load_byte:
-               ptr = load_pointer((struct sk_buff *) (unsigned long) CTX, off, 1, &tmp);
-               if (likely(ptr != NULL)) {
-                       BPF_R0 = *(u8 *)ptr;
-                       CONT;
-               }
-
-               return 0;
-       LD_IND_W: /* BPF_R0 = ntohl(*(u32 *) (skb->data + src_reg + imm32)) */
-               off = IMM + SRC;
-               goto load_word;
-       LD_IND_H: /* BPF_R0 = ntohs(*(u16 *) (skb->data + src_reg + imm32)) */
-               off = IMM + SRC;
-               goto load_half;
-       LD_IND_B: /* BPF_R0 = *(u8 *) (skb->data + src_reg + imm32) */
-               off = IMM + SRC;
-               goto load_byte;
-
-       default_label:
-               /* If we ever reach this, we have a bug somewhere. */
-               WARN_RATELIMIT(1, "unknown opcode %02x\n", insn->code);
-               return 0;
-}
-
 /* Helper to find the offset of pkt_type in sk_buff structure. We want
  * to make sure its still a 3bit field starting at a byte boundary;
  * taken from arch/x86/net/bpf_jit_comp.c.
@@ -667,9 +174,9 @@ static u64 __get_random_u32(u64 ctx, u64 a, u64 x, u64 r4, u64 r5)
 }
 
 static bool convert_bpf_extensions(struct sock_filter *fp,
-                                  struct sock_filter_int **insnp)
+                                  struct bpf_insn **insnp)
 {
-       struct sock_filter_int *insn = *insnp;
+       struct bpf_insn *insn = *insnp;
 
        switch (fp->k) {
        case SKF_AD_OFF + SKF_AD_PROTOCOL:
@@ -805,7 +312,7 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
 }
 
 /**
- *     sk_convert_filter - convert filter program
+ *     bpf_convert_filter - convert filter program
  *     @prog: the user passed filter program
  *     @len: the length of the user passed filter program
  *     @new_prog: buffer where converted program will be stored
@@ -815,12 +322,12 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
  * Conversion workflow:
  *
  * 1) First pass for calculating the new program length:
- *   sk_convert_filter(old_prog, old_len, NULL, &new_len)
+ *   bpf_convert_filter(old_prog, old_len, NULL, &new_len)
  *
  * 2) 2nd pass to remap in two passes: 1st pass finds new
  *    jump offsets, 2nd pass remapping:
- *   new_prog = kmalloc(sizeof(struct sock_filter_int) * new_len);
- *   sk_convert_filter(old_prog, old_len, new_prog, &new_len);
+ *   new_prog = kmalloc(sizeof(struct bpf_insn) * new_len);
+ *   bpf_convert_filter(old_prog, old_len, new_prog, &new_len);
  *
  * User BPF's register A is mapped to our BPF register 6, user BPF
  * register X is mapped to BPF register 7; frame pointer is always
@@ -828,11 +335,11 @@ static bool convert_bpf_extensions(struct sock_filter *fp,
  * for socket filters: ctx == 'struct sk_buff *', for seccomp:
  * ctx == 'struct seccomp_data *'.
  */
-int sk_convert_filter(struct sock_filter *prog, int len,
-                     struct sock_filter_int *new_prog, int *new_len)
+int bpf_convert_filter(struct sock_filter *prog, int len,
+                      struct bpf_insn *new_prog, int *new_len)
 {
        int new_flen = 0, pass = 0, target, i;
-       struct sock_filter_int *new_insn;
+       struct bpf_insn *new_insn;
        struct sock_filter *fp;
        int *addrs = NULL;
        u8 bpf_src;
@@ -840,11 +347,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
        BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
        BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
 
-       if (len <= 0 || len >= BPF_MAXINSNS)
+       if (len <= 0 || len > BPF_MAXINSNS)
                return -EINVAL;
 
        if (new_prog) {
-               addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL);
+               addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
                if (!addrs)
                        return -ENOMEM;
        }
@@ -858,8 +365,8 @@ do_pass:
        new_insn++;
 
        for (i = 0; i < len; fp++, i++) {
-               struct sock_filter_int tmp_insns[6] = { };
-               struct sock_filter_int *insn = tmp_insns;
+               struct bpf_insn tmp_insns[6] = { };
+               struct bpf_insn *insn = tmp_insns;
 
                if (addrs)
                        addrs[i] = new_insn - new_prog;
@@ -1094,14 +601,14 @@ err:
  * a cell if not previously written, and we check all branches to be sure
  * a malicious user doesn't try to abuse us.
  */
-static int check_load_and_stores(struct sock_filter *filter, int flen)
+static int check_load_and_stores(const struct sock_filter *filter, int flen)
 {
        u16 *masks, memvalid = 0; /* One bit per cell, 16 cells */
        int pc, ret = 0;
 
        BUILD_BUG_ON(BPF_MEMWORDS > 16);
 
-       masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL);
+       masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
        if (!masks)
                return -ENOMEM;
 
@@ -1214,7 +721,7 @@ static bool chk_code_allowed(u16 code_to_probe)
 }
 
 /**
- *     sk_chk_filter - verify socket filter code
+ *     bpf_check_classic - verify socket filter code
  *     @filter: filter to verify
  *     @flen: length of filter
  *
@@ -1227,7 +734,7 @@ static bool chk_code_allowed(u16 code_to_probe)
  *
  * Returns 0 if the rule set is legal or -EINVAL if not.
  */
-int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
+int bpf_check_classic(const struct sock_filter *filter, unsigned int flen)
 {
        bool anc_found;
        int pc;
@@ -1237,7 +744,7 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
 
        /* Check the filter code now */
        for (pc = 0; pc < flen; pc++) {
-               struct sock_filter *ftest = &filter[pc];
+               const struct sock_filter *ftest = &filter[pc];
 
                /* May we actually operate on this code? */
                if (!chk_code_allowed(ftest->code))
@@ -1301,12 +808,12 @@ int sk_chk_filter(struct sock_filter *filter, unsigned int flen)
 
        return -EINVAL;
 }
-EXPORT_SYMBOL(sk_chk_filter);
+EXPORT_SYMBOL(bpf_check_classic);
 
-static int sk_store_orig_filter(struct sk_filter *fp,
-                               const struct sock_fprog *fprog)
+static int bpf_prog_store_orig_filter(struct bpf_prog *fp,
+                                     const struct sock_fprog *fprog)
 {
-       unsigned int fsize = sk_filter_proglen(fprog);
+       unsigned int fsize = bpf_classic_proglen(fprog);
        struct sock_fprog_kern *fkprog;
 
        fp->orig_prog = kmalloc(sizeof(*fkprog), GFP_KERNEL);
@@ -1324,7 +831,7 @@ static int sk_store_orig_filter(struct sk_filter *fp,
        return 0;
 }
 
-static void sk_release_orig_filter(struct sk_filter *fp)
+static void bpf_release_orig_filter(struct bpf_prog *fp)
 {
        struct sock_fprog_kern *fprog = fp->orig_prog;
 
@@ -1334,6 +841,18 @@ static void sk_release_orig_filter(struct sk_filter *fp)
        }
 }
 
+static void __bpf_prog_release(struct bpf_prog *prog)
+{
+       bpf_release_orig_filter(prog);
+       bpf_prog_free(prog);
+}
+
+static void __sk_filter_release(struct sk_filter *fp)
+{
+       __bpf_prog_release(fp->prog);
+       kfree(fp);
+}
+
 /**
  *     sk_filter_release_rcu - Release a socket filter by rcu_head
  *     @rcu: rcu_head that contains the sk_filter to free
@@ -1342,8 +861,7 @@ static void sk_filter_release_rcu(struct rcu_head *rcu)
 {
        struct sk_filter *fp = container_of(rcu, struct sk_filter, rcu);
 
-       sk_release_orig_filter(fp);
-       sk_filter_free(fp);
+       __sk_filter_release(fp);
 }
 
 /**
@@ -1360,44 +878,33 @@ static void sk_filter_release(struct sk_filter *fp)
 
 void sk_filter_uncharge(struct sock *sk, struct sk_filter *fp)
 {
-       atomic_sub(sk_filter_size(fp->len), &sk->sk_omem_alloc);
-       sk_filter_release(fp);
-}
+       u32 filter_size = bpf_prog_size(fp->prog->len);
 
-void sk_filter_charge(struct sock *sk, struct sk_filter *fp)
-{
-       atomic_inc(&fp->refcnt);
-       atomic_add(sk_filter_size(fp->len), &sk->sk_omem_alloc);
+       atomic_sub(filter_size, &sk->sk_omem_alloc);
+       sk_filter_release(fp);
 }
 
-static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
-                                             struct sock *sk,
-                                             unsigned int len)
+/* try to charge the socket memory if there is space available
+ * return true on success
+ */
+bool sk_filter_charge(struct sock *sk, struct sk_filter *fp)
 {
-       struct sk_filter *fp_new;
-
-       if (sk == NULL)
-               return krealloc(fp, len, GFP_KERNEL);
-
-       fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
-       if (fp_new) {
-               *fp_new = *fp;
-               /* As we're kepping orig_prog in fp_new along,
-                * we need to make sure we're not evicting it
-                * from the old fp.
-                */
-               fp->orig_prog = NULL;
-               sk_filter_uncharge(sk, fp);
+       u32 filter_size = bpf_prog_size(fp->prog->len);
+
+       /* same check as in sock_kmalloc() */
+       if (filter_size <= sysctl_optmem_max &&
+           atomic_read(&sk->sk_omem_alloc) + filter_size < sysctl_optmem_max) {
+               atomic_inc(&fp->refcnt);
+               atomic_add(filter_size, &sk->sk_omem_alloc);
+               return true;
        }
-
-       return fp_new;
+       return false;
 }
 
-static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
-                                            struct sock *sk)
+static struct bpf_prog *bpf_migrate_filter(struct bpf_prog *fp)
 {
        struct sock_filter *old_prog;
-       struct sk_filter *old_fp;
+       struct bpf_prog *old_fp;
        int err, new_len, old_len = fp->len;
 
        /* We are free to overwrite insns et al right here as it
@@ -1406,7 +913,7 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
         * representation.
         */
        BUILD_BUG_ON(sizeof(struct sock_filter) !=
-                    sizeof(struct sock_filter_int));
+                    sizeof(struct bpf_insn));
 
        /* Conversion cannot happen on overlapping memory areas,
         * so we need to keep the user BPF around until the 2nd
@@ -1420,13 +927,13 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
        }
 
        /* 1st pass: calculate the new program length. */
-       err = sk_convert_filter(old_prog, old_len, NULL, &new_len);
+       err = bpf_convert_filter(old_prog, old_len, NULL, &new_len);
        if (err)
                goto out_err_free;
 
        /* Expand fp for appending the new filter representation. */
        old_fp = fp;
-       fp = __sk_migrate_realloc(old_fp, sk, sk_filter_size(new_len));
+       fp = krealloc(old_fp, bpf_prog_size(new_len), GFP_KERNEL);
        if (!fp) {
                /* The old_fp is still around in case we couldn't
                 * allocate new memory, so uncharge on that one.
@@ -1438,17 +945,17 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
 
        fp->len = new_len;
 
-       /* 2nd pass: remap sock_filter insns into sock_filter_int insns. */
-       err = sk_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
+       /* 2nd pass: remap sock_filter insns into bpf_insn insns. */
+       err = bpf_convert_filter(old_prog, old_len, fp->insnsi, &new_len);
        if (err)
-               /* 2nd sk_convert_filter() can fail only if it fails
+               /* 2nd bpf_convert_filter() can fail only if it fails
                 * to allocate memory, remapping must succeed. Note,
                 * that at this time old_fp has already been released
-                * by __sk_migrate_realloc().
+                * by krealloc().
                 */
                goto out_err_free;
 
-       sk_filter_select_runtime(fp);
+       bpf_prog_select_runtime(fp);
 
        kfree(old_prog);
        return fp;
@@ -1456,55 +963,20 @@ static struct sk_filter *__sk_migrate_filter(struct sk_filter *fp,
 out_err_free:
        kfree(old_prog);
 out_err:
-       /* Rollback filter setup. */
-       if (sk != NULL)
-               sk_filter_uncharge(sk, fp);
-       else
-               kfree(fp);
+       __bpf_prog_release(fp);
        return ERR_PTR(err);
 }
 
-void __weak bpf_int_jit_compile(struct sk_filter *prog)
-{
-}
-
-/**
- *     sk_filter_select_runtime - select execution runtime for BPF program
- *     @fp: sk_filter populated with internal BPF program
- *
- * try to JIT internal BPF program, if JIT is not available select interpreter
- * BPF program will be executed via SK_RUN_FILTER() macro
- */
-void sk_filter_select_runtime(struct sk_filter *fp)
-{
-       fp->bpf_func = (void *) __sk_run_filter;
-
-       /* Probe if internal BPF can be JITed */
-       bpf_int_jit_compile(fp);
-}
-EXPORT_SYMBOL_GPL(sk_filter_select_runtime);
-
-/* free internal BPF program */
-void sk_filter_free(struct sk_filter *fp)
-{
-       bpf_jit_free(fp);
-}
-EXPORT_SYMBOL_GPL(sk_filter_free);
-
-static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
-                                            struct sock *sk)
+static struct bpf_prog *bpf_prepare_filter(struct bpf_prog *fp)
 {
        int err;
 
        fp->bpf_func = NULL;
        fp->jited = 0;
 
-       err = sk_chk_filter(fp->insns, fp->len);
+       err = bpf_check_classic(fp->insns, fp->len);
        if (err) {
-               if (sk != NULL)
-                       sk_filter_uncharge(sk, fp);
-               else
-                       kfree(fp);
+               __bpf_prog_release(fp);
                return ERR_PTR(err);
        }
 
@@ -1517,38 +989,36 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
         * internal BPF translation for the optimized interpreter.
         */
        if (!fp->jited)
-               fp = __sk_migrate_filter(fp, sk);
+               fp = bpf_migrate_filter(fp);
 
        return fp;
 }
 
 /**
- *     sk_unattached_filter_create - create an unattached filter
- *     @fprog: the filter program
+ *     bpf_prog_create - create an unattached filter
  *     @pfp: the unattached filter that is created
+ *     @fprog: the filter program
  *
  * Create a filter independent of any socket. We first run some
  * sanity checks on it to make sure it does not explode on us later.
  * If an error occurs or there is insufficient memory for the filter
  * a negative errno code is returned. On success the return is zero.
  */
-int sk_unattached_filter_create(struct sk_filter **pfp,
-                               struct sock_fprog_kern *fprog)
+int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog)
 {
-       unsigned int fsize = sk_filter_proglen(fprog);
-       struct sk_filter *fp;
+       unsigned int fsize = bpf_classic_proglen(fprog);
+       struct bpf_prog *fp;
 
        /* Make sure new filter is there and in the right amounts. */
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = kmalloc(sk_filter_size(fprog->len), GFP_KERNEL);
+       fp = kmalloc(bpf_prog_size(fprog->len), GFP_KERNEL);
        if (!fp)
                return -ENOMEM;
 
        memcpy(fp->insns, fprog->filter, fsize);
 
-       atomic_set(&fp->refcnt, 1);
        fp->len = fprog->len;
        /* Since unattached filters are not copied back to user
         * space through sk_get_filter(), we do not need to hold
@@ -1556,23 +1026,23 @@ int sk_unattached_filter_create(struct sk_filter **pfp,
         */
        fp->orig_prog = NULL;
 
-       /* __sk_prepare_filter() already takes care of uncharging
+       /* bpf_prepare_filter() already takes care of freeing
         * memory in case something goes wrong.
         */
-       fp = __sk_prepare_filter(fp, NULL);
+       fp = bpf_prepare_filter(fp);
        if (IS_ERR(fp))
                return PTR_ERR(fp);
 
        *pfp = fp;
        return 0;
 }
-EXPORT_SYMBOL_GPL(sk_unattached_filter_create);
+EXPORT_SYMBOL_GPL(bpf_prog_create);
 
-void sk_unattached_filter_destroy(struct sk_filter *fp)
+void bpf_prog_destroy(struct bpf_prog *fp)
 {
-       sk_filter_release(fp);
+       __bpf_prog_release(fp);
 }
-EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
+EXPORT_SYMBOL_GPL(bpf_prog_destroy);
 
 /**
  *     sk_attach_filter - attach a socket filter
@@ -1587,8 +1057,9 @@ EXPORT_SYMBOL_GPL(sk_unattached_filter_destroy);
 int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
 {
        struct sk_filter *fp, *old_fp;
-       unsigned int fsize = sk_filter_proglen(fprog);
-       unsigned int sk_fsize = sk_filter_size(fprog->len);
+       unsigned int fsize = bpf_classic_proglen(fprog);
+       unsigned int bpf_fsize = bpf_prog_size(fprog->len);
+       struct bpf_prog *prog;
        int err;
 
        if (sock_flag(sk, SOCK_FILTER_LOCKED))
@@ -1598,30 +1069,43 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk)
        if (fprog->filter == NULL)
                return -EINVAL;
 
-       fp = sock_kmalloc(sk, sk_fsize, GFP_KERNEL);
-       if (!fp)
+       prog = kmalloc(bpf_fsize, GFP_KERNEL);
+       if (!prog)
                return -ENOMEM;
 
-       if (copy_from_user(fp->insns, fprog->filter, fsize)) {
-               sock_kfree_s(sk, fp, sk_fsize);
+       if (copy_from_user(prog->insns, fprog->filter, fsize)) {
+               kfree(prog);
                return -EFAULT;
        }
 
-       atomic_set(&fp->refcnt, 1);
-       fp->len = fprog->len;
+       prog->len = fprog->len;
 
-       err = sk_store_orig_filter(fp, fprog);
+       err = bpf_prog_store_orig_filter(prog, fprog);
        if (err) {
-               sk_filter_uncharge(sk, fp);
+               kfree(prog);
                return -ENOMEM;
        }
 
-       /* __sk_prepare_filter() already takes care of uncharging
+       /* bpf_prepare_filter() already takes care of freeing
         * memory in case something goes wrong.
         */
-       fp = __sk_prepare_filter(fp, sk);
-       if (IS_ERR(fp))
-               return PTR_ERR(fp);
+       prog = bpf_prepare_filter(prog);
+       if (IS_ERR(prog))
+               return PTR_ERR(prog);
+
+       fp = kmalloc(sizeof(*fp), GFP_KERNEL);
+       if (!fp) {
+               __bpf_prog_release(prog);
+               return -ENOMEM;
+       }
+       fp->prog = prog;
+
+       atomic_set(&fp->refcnt, 0);
+
+       if (!sk_filter_charge(sk, fp)) {
+               __sk_filter_release(fp);
+               return -ENOMEM;
+       }
 
        old_fp = rcu_dereference_protected(sk->sk_filter,
                                           sock_owned_by_user(sk));
@@ -1670,7 +1154,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
        /* We're copying the filter that has been originally attached,
         * so no conversion/decode needed anymore.
         */
-       fprog = filter->orig_prog;
+       fprog = filter->prog->orig_prog;
 
        ret = fprog->len;
        if (!len)
@@ -1682,7 +1166,7 @@ int sk_get_filter(struct sock *sk, struct sock_filter __user *ubuf,
                goto out;
 
        ret = -EFAULT;
-       if (copy_to_user(ubuf, fprog->filter, sk_filter_proglen(fprog)))
+       if (copy_to_user(ubuf, fprog->filter, bpf_classic_proglen(fprog)))
                goto out;
 
        /* Instead of bytes, the API requests to return the number
index 107ed12..5f362c1 100644 (file)
@@ -80,6 +80,8 @@ ip:
        case htons(ETH_P_IPV6): {
                const struct ipv6hdr *iph;
                struct ipv6hdr _iph;
+               __be32 flow_label;
+
 ipv6:
                iph = skb_header_pointer(skb, nhoff, sizeof(_iph), &_iph);
                if (!iph)
@@ -89,6 +91,21 @@ ipv6:
                flow->src = (__force __be32)ipv6_addr_hash(&iph->saddr);
                flow->dst = (__force __be32)ipv6_addr_hash(&iph->daddr);
                nhoff += sizeof(struct ipv6hdr);
+
+               flow_label = ip6_flowlabel(iph);
+               if (flow_label) {
+                       /* Awesome, IPv6 packet has a flow label so we can
+                        * use that to represent the ports without any
+                        * further dissection.
+                        */
+                       flow->n_proto = proto;
+                       flow->ip_proto = ip_proto;
+                       flow->ports = flow_label;
+                       flow->thoff = (u16)nhoff;
+
+                       return true;
+               }
+
                break;
        }
        case htons(ETH_P_8021AD):
@@ -175,6 +192,7 @@ ipv6:
                break;
        }
 
+       flow->n_proto = proto;
        flow->ip_proto = ip_proto;
        flow->ports = skb_flow_get_ports(skb, nhoff, ip_proto);
        flow->thoff = (u16) nhoff;
@@ -195,12 +213,33 @@ static __always_inline u32 __flow_hash_3words(u32 a, u32 b, u32 c)
        return jhash_3words(a, b, c, hashrnd);
 }
 
-static __always_inline u32 __flow_hash_1word(u32 a)
+static inline u32 __flow_hash_from_keys(struct flow_keys *keys)
 {
-       __flow_hash_secret_init();
-       return jhash_1word(a, hashrnd);
+       u32 hash;
+
+       /* get a consistent hash (same value on both flow directions) */
+       if (((__force u32)keys->dst < (__force u32)keys->src) ||
+           (((__force u32)keys->dst == (__force u32)keys->src) &&
+            ((__force u16)keys->port16[1] < (__force u16)keys->port16[0]))) {
+               swap(keys->dst, keys->src);
+               swap(keys->port16[0], keys->port16[1]);
+       }
+
+       hash = __flow_hash_3words((__force u32)keys->dst,
+                                 (__force u32)keys->src,
+                                 (__force u32)keys->ports);
+       if (!hash)
+               hash = 1;
+
+       return hash;
 }
 
+u32 flow_hash_from_keys(struct flow_keys *keys)
+{
+       return __flow_hash_from_keys(keys);
+}
+EXPORT_SYMBOL(flow_hash_from_keys);
+
 /*
  * __skb_get_hash: calculate a flow hash based on src/dst addresses
  * and src/dst port numbers.  Sets hash in skb to non-zero hash value
@@ -210,7 +249,6 @@ static __always_inline u32 __flow_hash_1word(u32 a)
 void __skb_get_hash(struct sk_buff *skb)
 {
        struct flow_keys keys;
-       u32 hash;
 
        if (!skb_flow_dissect(skb, &keys))
                return;
@@ -218,21 +256,9 @@ void __skb_get_hash(struct sk_buff *skb)
        if (keys.ports)
                skb->l4_hash = 1;
 
-       /* get a consistent hash (same value on both flow directions) */
-       if (((__force u32)keys.dst < (__force u32)keys.src) ||
-           (((__force u32)keys.dst == (__force u32)keys.src) &&
-            ((__force u16)keys.port16[1] < (__force u16)keys.port16[0]))) {
-               swap(keys.dst, keys.src);
-               swap(keys.port16[0], keys.port16[1]);
-       }
-
-       hash = __flow_hash_3words((__force u32)keys.dst,
-                                 (__force u32)keys.src,
-                                 (__force u32)keys.ports);
-       if (!hash)
-               hash = 1;
+       skb->sw_hash = 1;
 
-       skb->hash = hash;
+       skb->hash = __flow_hash_from_keys(&keys);
 }
 EXPORT_SYMBOL(__skb_get_hash);
 
@@ -240,7 +266,7 @@ EXPORT_SYMBOL(__skb_get_hash);
  * Returns a Tx hash based on the given packet descriptor a Tx queues' number
  * to be used as a distribution range.
  */
-u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
+u16 __skb_tx_hash(const struct net_device *dev, struct sk_buff *skb,
                  unsigned int num_tx_queues)
 {
        u32 hash;
@@ -260,13 +286,7 @@ u16 __skb_tx_hash(const struct net_device *dev, const struct sk_buff *skb,
                qcount = dev->tc_to_txq[tc].count;
        }
 
-       if (skb->sk && skb->sk->sk_hash)
-               hash = skb->sk->sk_hash;
-       else
-               hash = (__force u16) skb->protocol;
-       hash = __flow_hash_1word(hash);
-
-       return (u16) (((u64) hash * qcount) >> 32) + qoffset;
+       return (u16) (((u64)skb_get_hash(skb) * qcount) >> 32) + qoffset;
 }
 EXPORT_SYMBOL(__skb_tx_hash);
 
@@ -338,17 +358,10 @@ static inline int get_xps_queue(struct net_device *dev, struct sk_buff *skb)
                if (map) {
                        if (map->len == 1)
                                queue_index = map->queues[0];
-                       else {
-                               u32 hash;
-                               if (skb->sk && skb->sk->sk_hash)
-                                       hash = skb->sk->sk_hash;
-                               else
-                                       hash = (__force u16) skb->protocol ^
-                                           skb->hash;
-                               hash = __flow_hash_1word(hash);
+                       else
                                queue_index = map->queues[
-                                   ((u64)hash * map->len) >> 32];
-                       }
+                                   ((u64)skb_get_hash(skb) * map->len) >> 32];
+
                        if (unlikely(queue_index >= dev->real_num_tx_queues))
                                queue_index = -1;
                }
index b618694..e1ec45a 100644 (file)
@@ -39,7 +39,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
 {
        int size, ct, err;
 
-       if (m->msg_namelen) {
+       if (m->msg_name && m->msg_namelen) {
                if (mode == VERIFY_READ) {
                        void __user *namep;
                        namep = (void __user __force *) m->msg_name;
@@ -48,10 +48,10 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
                        if (err < 0)
                                return err;
                }
-               if (m->msg_name)
-                       m->msg_name = address;
+               m->msg_name = address;
        } else {
                m->msg_name = NULL;
+               m->msg_namelen = 0;
        }
 
        size = m->msg_iovlen * sizeof(struct iovec);
@@ -74,61 +74,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
        return err;
 }
 
-/*
- *     Copy kernel to iovec. Returns -EFAULT on error.
- */
-
-int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
-                     int offset, int len)
-{
-       int copy;
-       for (; len > 0; ++iov) {
-               /* Skip over the finished iovecs */
-               if (unlikely(offset >= iov->iov_len)) {
-                       offset -= iov->iov_len;
-                       continue;
-               }
-               copy = min_t(unsigned int, iov->iov_len - offset, len);
-               if (copy_to_user(iov->iov_base + offset, kdata, copy))
-                       return -EFAULT;
-               offset = 0;
-               kdata += copy;
-               len -= copy;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_toiovecend);
-
-/*
- *     Copy iovec to kernel. Returns -EFAULT on error.
- */
-
-int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
-                       int offset, int len)
-{
-       /* Skip over the finished iovecs */
-       while (offset >= iov->iov_len) {
-               offset -= iov->iov_len;
-               iov++;
-       }
-
-       while (len > 0) {
-               u8 __user *base = iov->iov_base + offset;
-               int copy = min_t(unsigned int, len, iov->iov_len - offset);
-
-               offset = 0;
-               if (copy_from_user(kdata, base, copy))
-                       return -EFAULT;
-               len -= copy;
-               kdata += copy;
-               iov++;
-       }
-
-       return 0;
-}
-EXPORT_SYMBOL(memcpy_fromiovecend);
-
 /*
  *     And now for the all-in-one: copy and checksum from a user iovec
  *     directly to a datagram
index 32d872e..ef31fef 100644 (file)
@@ -2249,7 +2249,7 @@ static int pneigh_fill_info(struct sk_buff *skb, struct pneigh_entry *pn,
        ndm->ndm_pad1    = 0;
        ndm->ndm_pad2    = 0;
        ndm->ndm_flags   = pn->flags | NTF_PROXY;
-       ndm->ndm_type    = NDA_DST;
+       ndm->ndm_type    = RTN_UNICAST;
        ndm->ndm_ifindex = pn->dev->ifindex;
        ndm->ndm_state   = NUD_NONE;
 
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
                memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
                       sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
        } else {
+               struct neigh_table *tbl = p->tbl;
                dev_name_source = "default";
-               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1);
-               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1;
-               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2;
-               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3;
+               t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
+               t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
        }
 
        if (handler) {
index 1cac29e..9dd0669 100644 (file)
@@ -43,12 +43,12 @@ static ssize_t netdev_show(const struct device *dev,
                           struct device_attribute *attr, char *buf,
                           ssize_t (*format)(const struct net_device *, char *))
 {
-       struct net_device *net = to_net_dev(dev);
+       struct net_device *ndev = to_net_dev(dev);
        ssize_t ret = -EINVAL;
 
        read_lock(&dev_base_lock);
-       if (dev_isalive(net))
-               ret = (*format)(net, buf);
+       if (dev_isalive(ndev))
+               ret = (*format)(ndev, buf);
        read_unlock(&dev_base_lock);
 
        return ret;
@@ -56,9 +56,9 @@ static ssize_t netdev_show(const struct device *dev,
 
 /* generate a show function for simple field */
 #define NETDEVICE_SHOW(field, format_string)                           \
-static ssize_t format_##field(const struct net_device *net, char *buf) \
+static ssize_t format_##field(const struct net_device *dev, char *buf) \
 {                                                                      \
-       return sprintf(buf, format_string, net->field);                 \
+       return sprintf(buf, format_string, dev->field);                 \
 }                                                                      \
 static ssize_t field##_show(struct device *dev,                                \
                            struct device_attribute *attr, char *buf)   \
@@ -112,16 +112,35 @@ NETDEVICE_SHOW_RO(ifindex, fmt_dec);
 NETDEVICE_SHOW_RO(type, fmt_dec);
 NETDEVICE_SHOW_RO(link_mode, fmt_dec);
 
+static ssize_t format_name_assign_type(const struct net_device *dev, char *buf)
+{
+       return sprintf(buf, fmt_dec, dev->name_assign_type);
+}
+
+static ssize_t name_assign_type_show(struct device *dev,
+                                    struct device_attribute *attr,
+                                    char *buf)
+{
+       struct net_device *ndev = to_net_dev(dev);
+       ssize_t ret = -EINVAL;
+
+       if (ndev->name_assign_type != NET_NAME_UNKNOWN)
+               ret = netdev_show(dev, attr, buf, format_name_assign_type);
+
+       return ret;
+}
+static DEVICE_ATTR_RO(name_assign_type);
+
 /* use same locking rules as GIFHWADDR ioctl's */
 static ssize_t address_show(struct device *dev, struct device_attribute *attr,
                            char *buf)
 {
-       struct net_device *net = to_net_dev(dev);
+       struct net_device *ndev = to_net_dev(dev);
        ssize_t ret = -EINVAL;
 
        read_lock(&dev_base_lock);
-       if (dev_isalive(net))
-               ret = sysfs_format_mac(buf, net->dev_addr, net->addr_len);
+       if (dev_isalive(ndev))
+               ret = sysfs_format_mac(buf, ndev->dev_addr, ndev->addr_len);
        read_unlock(&dev_base_lock);
        return ret;
 }
@@ -130,18 +149,18 @@ static DEVICE_ATTR_RO(address);
 static ssize_t broadcast_show(struct device *dev,
                              struct device_attribute *attr, char *buf)
 {
-       struct net_device *net = to_net_dev(dev);
-       if (dev_isalive(net))
-               return sysfs_format_mac(buf, net->broadcast, net->addr_len);
+       struct net_device *ndev = to_net_dev(dev);
+       if (dev_isalive(ndev))
+               return sysfs_format_mac(buf, ndev->broadcast, ndev->addr_len);
        return -EINVAL;
 }
 static DEVICE_ATTR_RO(broadcast);
 
-static int change_carrier(struct net_device *net, unsigned long new_carrier)
+static int change_carrier(struct net_device *dev, unsigned long new_carrier)
 {
-       if (!netif_running(net))
+       if (!netif_running(dev))
                return -EINVAL;
-       return dev_change_carrier(net, (bool) new_carrier);
+       return dev_change_carrier(dev, (bool) new_carrier);
 }
 
 static ssize_t carrier_store(struct device *dev, struct device_attribute *attr,
@@ -265,9 +284,9 @@ static DEVICE_ATTR_RO(carrier_changes);
 
 /* read-write attributes */
 
-static int change_mtu(struct net_device *net, unsigned long new_mtu)
+static int change_mtu(struct net_device *dev, unsigned long new_mtu)
 {
-       return dev_set_mtu(net, (int) new_mtu);
+       return dev_set_mtu(dev, (int) new_mtu);
 }
 
 static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
@@ -277,9 +296,9 @@ static ssize_t mtu_store(struct device *dev, struct device_attribute *attr,
 }
 NETDEVICE_SHOW_RW(mtu, fmt_dec);
 
-static int change_flags(struct net_device *net, unsigned long new_flags)
+static int change_flags(struct net_device *dev, unsigned long new_flags)
 {
-       return dev_change_flags(net, (unsigned int) new_flags);
+       return dev_change_flags(dev, (unsigned int) new_flags);
 }
 
 static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
@@ -289,9 +308,9 @@ static ssize_t flags_store(struct device *dev, struct device_attribute *attr,
 }
 NETDEVICE_SHOW_RW(flags, fmt_hex);
 
-static int change_tx_queue_len(struct net_device *net, unsigned long new_len)
+static int change_tx_queue_len(struct net_device *dev, unsigned long new_len)
 {
-       net->tx_queue_len = new_len;
+       dev->tx_queue_len = new_len;
        return 0;
 }
 
@@ -344,9 +363,9 @@ static ssize_t ifalias_show(struct device *dev,
 }
 static DEVICE_ATTR_RW(ifalias);
 
-static int change_group(struct net_device *net, unsigned long new_group)
+static int change_group(struct net_device *dev, unsigned long new_group)
 {
-       dev_set_group(net, (int) new_group);
+       dev_set_group(dev, (int) new_group);
        return 0;
 }
 
@@ -387,6 +406,7 @@ static struct attribute *net_class_attrs[] = {
        &dev_attr_dev_port.attr,
        &dev_attr_iflink.attr,
        &dev_attr_ifindex.attr,
+       &dev_attr_name_assign_type.attr,
        &dev_attr_addr_assign_type.attr,
        &dev_attr_addr_len.attr,
        &dev_attr_link_mode.attr,
@@ -776,20 +796,20 @@ static struct kobj_type rx_queue_ktype = {
        .namespace = rx_queue_namespace
 };
 
-static int rx_queue_add_kobject(struct net_device *net, int index)
+static int rx_queue_add_kobject(struct net_device *dev, int index)
 {
-       struct netdev_rx_queue *queue = net->_rx + index;
+       struct netdev_rx_queue *queue = dev->_rx + index;
        struct kobject *kobj = &queue->kobj;
        int error = 0;
 
-       kobj->kset = net->queues_kset;
+       kobj->kset = dev->queues_kset;
        error = kobject_init_and_add(kobj, &rx_queue_ktype, NULL,
            "rx-%u", index);
        if (error)
                goto exit;
 
-       if (net->sysfs_rx_queue_group) {
-               error = sysfs_create_group(kobj, net->sysfs_rx_queue_group);
+       if (dev->sysfs_rx_queue_group) {
+               error = sysfs_create_group(kobj, dev->sysfs_rx_queue_group);
                if (error)
                        goto exit;
        }
@@ -805,18 +825,18 @@ exit:
 #endif /* CONFIG_SYSFS */
 
 int
-net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+net_rx_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
 {
 #ifdef CONFIG_SYSFS
        int i;
        int error = 0;
 
 #ifndef CONFIG_RPS
-       if (!net->sysfs_rx_queue_group)
+       if (!dev->sysfs_rx_queue_group)
                return 0;
 #endif
        for (i = old_num; i < new_num; i++) {
-               error = rx_queue_add_kobject(net, i);
+               error = rx_queue_add_kobject(dev, i);
                if (error) {
                        new_num = old_num;
                        break;
@@ -824,10 +844,10 @@ net_rx_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
        }
 
        while (--i >= new_num) {
-               if (net->sysfs_rx_queue_group)
-                       sysfs_remove_group(&net->_rx[i].kobj,
-                                          net->sysfs_rx_queue_group);
-               kobject_put(&net->_rx[i].kobj);
+               if (dev->sysfs_rx_queue_group)
+                       sysfs_remove_group(&dev->_rx[i].kobj,
+                                          dev->sysfs_rx_queue_group);
+               kobject_put(&dev->_rx[i].kobj);
        }
 
        return error;
@@ -1135,13 +1155,13 @@ static struct kobj_type netdev_queue_ktype = {
        .namespace = netdev_queue_namespace,
 };
 
-static int netdev_queue_add_kobject(struct net_device *net, int index)
+static int netdev_queue_add_kobject(struct net_device *dev, int index)
 {
-       struct netdev_queue *queue = net->_tx + index;
+       struct netdev_queue *queue = dev->_tx + index;
        struct kobject *kobj = &queue->kobj;
        int error = 0;
 
-       kobj->kset = net->queues_kset;
+       kobj->kset = dev->queues_kset;
        error = kobject_init_and_add(kobj, &netdev_queue_ktype, NULL,
            "tx-%u", index);
        if (error)
@@ -1164,14 +1184,14 @@ exit:
 #endif /* CONFIG_SYSFS */
 
 int
-netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
+netdev_queue_update_kobjects(struct net_device *dev, int old_num, int new_num)
 {
 #ifdef CONFIG_SYSFS
        int i;
        int error = 0;
 
        for (i = old_num; i < new_num; i++) {
-               error = netdev_queue_add_kobject(net, i);
+               error = netdev_queue_add_kobject(dev, i);
                if (error) {
                        new_num = old_num;
                        break;
@@ -1179,7 +1199,7 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
        }
 
        while (--i >= new_num) {
-               struct netdev_queue *queue = net->_tx + i;
+               struct netdev_queue *queue = dev->_tx + i;
 
 #ifdef CONFIG_BQL
                sysfs_remove_group(&queue->kobj, &dql_group);
@@ -1193,25 +1213,25 @@ netdev_queue_update_kobjects(struct net_device *net, int old_num, int new_num)
 #endif /* CONFIG_SYSFS */
 }
 
-static int register_queue_kobjects(struct net_device *net)
+static int register_queue_kobjects(struct net_device *dev)
 {
        int error = 0, txq = 0, rxq = 0, real_rx = 0, real_tx = 0;
 
 #ifdef CONFIG_SYSFS
-       net->queues_kset = kset_create_and_add("queues",
-           NULL, &net->dev.kobj);
-       if (!net->queues_kset)
+       dev->queues_kset = kset_create_and_add("queues",
+           NULL, &dev->dev.kobj);
+       if (!dev->queues_kset)
                return -ENOMEM;
-       real_rx = net->real_num_rx_queues;
+       real_rx = dev->real_num_rx_queues;
 #endif
-       real_tx = net->real_num_tx_queues;
+       real_tx = dev->real_num_tx_queues;
 
-       error = net_rx_queue_update_kobjects(net, 0, real_rx);
+       error = net_rx_queue_update_kobjects(dev, 0, real_rx);
        if (error)
                goto error;
        rxq = real_rx;
 
-       error = netdev_queue_update_kobjects(net, 0, real_tx);
+       error = netdev_queue_update_kobjects(dev, 0, real_tx);
        if (error)
                goto error;
        txq = real_tx;
@@ -1219,24 +1239,24 @@ static int register_queue_kobjects(struct net_device *net)
        return 0;
 
 error:
-       netdev_queue_update_kobjects(net, txq, 0);
-       net_rx_queue_update_kobjects(net, rxq, 0);
+       netdev_queue_update_kobjects(dev, txq, 0);
+       net_rx_queue_update_kobjects(dev, rxq, 0);
        return error;
 }
 
-static void remove_queue_kobjects(struct net_device *net)
+static void remove_queue_kobjects(struct net_device *dev)
 {
        int real_rx = 0, real_tx = 0;
 
 #ifdef CONFIG_SYSFS
-       real_rx = net->real_num_rx_queues;
+       real_rx = dev->real_num_rx_queues;
 #endif
-       real_tx = net->real_num_tx_queues;
+       real_tx = dev->real_num_tx_queues;
 
-       net_rx_queue_update_kobjects(net, real_rx, 0);
-       netdev_queue_update_kobjects(net, real_tx, 0);
+       net_rx_queue_update_kobjects(dev, real_rx, 0);
+       netdev_queue_update_kobjects(dev, real_tx, 0);
 #ifdef CONFIG_SYSFS
-       kset_unregister(net->queues_kset);
+       kset_unregister(dev->queues_kset);
 #endif
 }
 
@@ -1329,13 +1349,13 @@ static struct class net_class = {
 /* Delete sysfs entries but hold kobject reference until after all
  * netdev references are gone.
  */
-void netdev_unregister_kobject(struct net_device * net)
+void netdev_unregister_kobject(struct net_device *ndev)
 {
-       struct device *dev = &(net->dev);
+       struct device *dev = &(ndev->dev);
 
        kobject_get(&dev->kobj);
 
-       remove_queue_kobjects(net);
+       remove_queue_kobjects(ndev);
 
        pm_runtime_set_memalloc_noio(dev, false);
 
@@ -1343,18 +1363,18 @@ void netdev_unregister_kobject(struct net_device * net)
 }
 
 /* Create sysfs entries for network device. */
-int netdev_register_kobject(struct net_device *net)
+int netdev_register_kobject(struct net_device *ndev)
 {
-       struct device *dev = &(net->dev);
-       const struct attribute_group **groups = net->sysfs_groups;
+       struct device *dev = &(ndev->dev);
+       const struct attribute_group **groups = ndev->sysfs_groups;
        int error = 0;
 
        device_initialize(dev);
        dev->class = &net_class;
-       dev->platform_data = net;
+       dev->platform_data = ndev;
        dev->groups = groups;
 
-       dev_set_name(dev, "%s", net->name);
+       dev_set_name(dev, "%s", ndev->name);
 
 #ifdef CONFIG_SYSFS
        /* Allow for a device specific group */
@@ -1364,10 +1384,10 @@ int netdev_register_kobject(struct net_device *net)
        *groups++ = &netstat_group;
 
 #if IS_ENABLED(CONFIG_WIRELESS_EXT) || IS_ENABLED(CONFIG_CFG80211)
-       if (net->ieee80211_ptr)
+       if (ndev->ieee80211_ptr)
                *groups++ = &wireless_group;
 #if IS_ENABLED(CONFIG_WIRELESS_EXT)
-       else if (net->wireless_handlers)
+       else if (ndev->wireless_handlers)
                *groups++ = &wireless_group;
 #endif
 #endif
@@ -1377,7 +1397,7 @@ int netdev_register_kobject(struct net_device *net)
        if (error)
                return error;
 
-       error = register_queue_kobjects(net);
+       error = register_queue_kobjects(ndev);
        if (error) {
                device_del(dev);
                return error;
index e33937f..907fb5e 100644 (file)
@@ -822,7 +822,8 @@ void __netpoll_cleanup(struct netpoll *np)
 
                RCU_INIT_POINTER(np->dev->npinfo, NULL);
                call_rcu_bh(&npinfo->rcu, rcu_cleanup_netpoll_info);
-       }
+       } else
+               RCU_INIT_POINTER(np->dev->npinfo, NULL);
 }
 EXPORT_SYMBOL_GPL(__netpoll_cleanup);
 
index fc17a9d..8b849dd 100644 (file)
@@ -69,8 +69,9 @@
  * for running devices in the if_list and sends packets until count is 0 it
  * also the thread checks the thread->control which is used for inter-process
  * communication. controlling process "posts" operations to the threads this
- * way. The if_lock should be possible to remove when add/rem_device is merged
- * into this too.
+ * way.
+ * The if_list is RCU protected, and the if_lock remains to protect updating
+ * of if_list, from "add_device" as it invoked from userspace (via proc write).
  *
  * By design there should only be *one* "controlling" process. In practice
  * multiple write accesses gives unpredictable result. Understood by "write"
 #define T_REMDEVALL   (1<<2)   /* Remove all devs */
 #define T_REMDEV      (1<<3)   /* Remove one dev */
 
-/* If lock -- can be removed after some work */
+/* If lock -- protects updating of if_list */
 #define   if_lock(t)           spin_lock(&(t->if_lock));
 #define   if_unlock(t)           spin_unlock(&(t->if_lock));
 
@@ -241,6 +242,7 @@ struct pktgen_dev {
        struct proc_dir_entry *entry;   /* proc file */
        struct pktgen_thread *pg_thread;/* the owner */
        struct list_head list;          /* chaining in the thread's run-queue */
+       struct rcu_head  rcu;           /* freed by RCU */
 
        int running;            /* if false, the test will stop */
 
@@ -802,7 +804,6 @@ static int strn_len(const char __user * user_buffer, unsigned int maxlen)
                case '\t':
                case ' ':
                        goto done_str;
-                       break;
                default:
                        break;
                }
@@ -1737,14 +1738,14 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
 
        seq_puts(seq, "Running: ");
 
-       if_lock(t);
-       list_for_each_entry(pkt_dev, &t->if_list, list)
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
                if (pkt_dev->running)
                        seq_printf(seq, "%s ", pkt_dev->odevname);
 
        seq_puts(seq, "\nStopped: ");
 
-       list_for_each_entry(pkt_dev, &t->if_list, list)
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
                if (!pkt_dev->running)
                        seq_printf(seq, "%s ", pkt_dev->odevname);
 
@@ -1753,7 +1754,7 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
        else
                seq_puts(seq, "\nResult: NA\n");
 
-       if_unlock(t);
+       rcu_read_unlock();
 
        return 0;
 }
@@ -1878,10 +1879,8 @@ static struct pktgen_dev *__pktgen_NN_threads(const struct pktgen_net *pn,
                pkt_dev = pktgen_find_dev(t, ifname, exact);
                if (pkt_dev) {
                        if (remove) {
-                               if_lock(t);
                                pkt_dev->removal_mark = 1;
                                t->control |= T_REMDEV;
-                               if_unlock(t);
                        }
                        break;
                }
@@ -1931,7 +1930,8 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
        list_for_each_entry(t, &pn->pktgen_threads, th_list) {
                struct pktgen_dev *pkt_dev;
 
-               list_for_each_entry(pkt_dev, &t->if_list, list) {
+               rcu_read_lock();
+               list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
                        if (pkt_dev->odev != dev)
                                continue;
 
@@ -1946,6 +1946,7 @@ static void pktgen_change_name(const struct pktgen_net *pn, struct net_device *d
                                       dev->name);
                        break;
                }
+               rcu_read_unlock();
        }
 }
 
@@ -2997,8 +2998,8 @@ static void pktgen_run(struct pktgen_thread *t)
 
        func_enter();
 
-       if_lock(t);
-       list_for_each_entry(pkt_dev, &t->if_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
 
                /*
                 * setup odev and create initial packet.
@@ -3007,18 +3008,18 @@ static void pktgen_run(struct pktgen_thread *t)
 
                if (pkt_dev->odev) {
                        pktgen_clear_counters(pkt_dev);
-                       pkt_dev->running = 1;   /* Cranke yeself! */
                        pkt_dev->skb = NULL;
                        pkt_dev->started_at = pkt_dev->next_tx = ktime_get();
 
                        set_pkt_overhead(pkt_dev);
 
                        strcpy(pkt_dev->result, "Starting");
+                       pkt_dev->running = 1;   /* Cranke yeself! */
                        started++;
                } else
                        strcpy(pkt_dev->result, "Error starting");
        }
-       if_unlock(t);
+       rcu_read_unlock();
        if (started)
                t->control &= ~(T_STOP);
 }
@@ -3041,27 +3042,25 @@ static int thread_is_running(const struct pktgen_thread *t)
 {
        const struct pktgen_dev *pkt_dev;
 
-       list_for_each_entry(pkt_dev, &t->if_list, list)
-               if (pkt_dev->running)
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list)
+               if (pkt_dev->running) {
+                       rcu_read_unlock();
                        return 1;
+               }
+       rcu_read_unlock();
        return 0;
 }
 
 static int pktgen_wait_thread_run(struct pktgen_thread *t)
 {
-       if_lock(t);
-
        while (thread_is_running(t)) {
 
-               if_unlock(t);
-
                msleep_interruptible(100);
 
                if (signal_pending(current))
                        goto signal;
-               if_lock(t);
        }
-       if_unlock(t);
        return 1;
 signal:
        return 0;
@@ -3166,10 +3165,10 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
                return -EINVAL;
        }
 
+       pkt_dev->running = 0;
        kfree_skb(pkt_dev->skb);
        pkt_dev->skb = NULL;
        pkt_dev->stopped_at = ktime_get();
-       pkt_dev->running = 0;
 
        show_results(pkt_dev, nr_frags);
 
@@ -3180,9 +3179,8 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
 {
        struct pktgen_dev *pkt_dev, *best = NULL;
 
-       if_lock(t);
-
-       list_for_each_entry(pkt_dev, &t->if_list, list) {
+       rcu_read_lock();
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
                if (!pkt_dev->running)
                        continue;
                if (best == NULL)
@@ -3190,7 +3188,8 @@ static struct pktgen_dev *next_to_run(struct pktgen_thread *t)
                else if (ktime_compare(pkt_dev->next_tx, best->next_tx) < 0)
                        best = pkt_dev;
        }
-       if_unlock(t);
+       rcu_read_unlock();
+
        return best;
 }
 
@@ -3200,13 +3199,13 @@ static void pktgen_stop(struct pktgen_thread *t)
 
        func_enter();
 
-       if_lock(t);
+       rcu_read_lock();
 
-       list_for_each_entry(pkt_dev, &t->if_list, list) {
+       list_for_each_entry_rcu(pkt_dev, &t->if_list, list) {
                pktgen_stop_device(pkt_dev);
        }
 
-       if_unlock(t);
+       rcu_read_unlock();
 }
 
 /*
@@ -3220,8 +3219,6 @@ static void pktgen_rem_one_if(struct pktgen_thread *t)
 
        func_enter();
 
-       if_lock(t);
-
        list_for_each_safe(q, n, &t->if_list) {
                cur = list_entry(q, struct pktgen_dev, list);
 
@@ -3235,8 +3232,6 @@ static void pktgen_rem_one_if(struct pktgen_thread *t)
 
                break;
        }
-
-       if_unlock(t);
 }
 
 static void pktgen_rem_all_ifs(struct pktgen_thread *t)
@@ -3248,8 +3243,6 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
 
        /* Remove all devices, free mem */
 
-       if_lock(t);
-
        list_for_each_safe(q, n, &t->if_list) {
                cur = list_entry(q, struct pktgen_dev, list);
 
@@ -3258,8 +3251,6 @@ static void pktgen_rem_all_ifs(struct pktgen_thread *t)
 
                pktgen_remove_device(t, cur);
        }
-
-       if_unlock(t);
 }
 
 static void pktgen_rem_thread(struct pktgen_thread *t)
@@ -3407,10 +3398,10 @@ static int pktgen_thread_worker(void *arg)
 
        pr_debug("starting pktgen/%d:  pid=%d\n", cpu, task_pid_nr(current));
 
-       set_current_state(TASK_INTERRUPTIBLE);
-
        set_freezable();
 
+       __set_current_state(TASK_RUNNING);
+
        while (!kthread_should_stop()) {
                pkt_dev = next_to_run(t);
 
@@ -3424,8 +3415,6 @@ static int pktgen_thread_worker(void *arg)
                        continue;
                }
 
-               __set_current_state(TASK_RUNNING);
-
                if (likely(pkt_dev)) {
                        pktgen_xmit(pkt_dev);
 
@@ -3456,9 +3445,8 @@ static int pktgen_thread_worker(void *arg)
                }
 
                try_to_freeze();
-
-               set_current_state(TASK_INTERRUPTIBLE);
        }
+       set_current_state(TASK_INTERRUPTIBLE);
 
        pr_debug("%s stopping all device\n", t->tsk->comm);
        pktgen_stop(t);
@@ -3485,8 +3473,8 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
        struct pktgen_dev *p, *pkt_dev = NULL;
        size_t len = strlen(ifname);
 
-       if_lock(t);
-       list_for_each_entry(p, &t->if_list, list)
+       rcu_read_lock();
+       list_for_each_entry_rcu(p, &t->if_list, list)
                if (strncmp(p->odevname, ifname, len) == 0) {
                        if (p->odevname[len]) {
                                if (exact || p->odevname[len] != '@')
@@ -3496,7 +3484,7 @@ static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
                        break;
                }
 
-       if_unlock(t);
+       rcu_read_unlock();
        pr_debug("find_dev(%s) returning %p\n", ifname, pkt_dev);
        return pkt_dev;
 }
@@ -3510,6 +3498,12 @@ static int add_dev_to_thread(struct pktgen_thread *t,
 {
        int rv = 0;
 
+       /* This function cannot be called concurrently, as its called
+        * under pktgen_thread_lock mutex, but it can run from
+        * userspace on another CPU than the kthread.  The if_lock()
+        * is used here to sync with concurrent instances of
+        * _rem_dev_from_if_list() invoked via kthread, which is also
+        * updating the if_list */
        if_lock(t);
 
        if (pkt_dev->pg_thread) {
@@ -3518,9 +3512,9 @@ static int add_dev_to_thread(struct pktgen_thread *t,
                goto out;
        }
 
-       list_add(&pkt_dev->list, &t->if_list);
-       pkt_dev->pg_thread = t;
        pkt_dev->running = 0;
+       pkt_dev->pg_thread = t;
+       list_add_rcu(&pkt_dev->list, &t->if_list);
 
 out:
        if_unlock(t);
@@ -3675,11 +3669,13 @@ static void _rem_dev_from_if_list(struct pktgen_thread *t,
        struct list_head *q, *n;
        struct pktgen_dev *p;
 
+       if_lock(t);
        list_for_each_safe(q, n, &t->if_list) {
                p = list_entry(q, struct pktgen_dev, list);
                if (p == pkt_dev)
-                       list_del(&p->list);
+                       list_del_rcu(&p->list);
        }
+       if_unlock(t);
 }
 
 static int pktgen_remove_device(struct pktgen_thread *t,
@@ -3699,20 +3695,22 @@ static int pktgen_remove_device(struct pktgen_thread *t,
                pkt_dev->odev = NULL;
        }
 
-       /* And update the thread if_list */
-
-       _rem_dev_from_if_list(t, pkt_dev);
-
+       /* Remove proc before if_list entry, because add_device uses
+        * list to determine if interface already exist, avoid race
+        * with proc_create_data() */
        if (pkt_dev->entry)
                proc_remove(pkt_dev->entry);
 
+       /* And update the thread if_list */
+       _rem_dev_from_if_list(t, pkt_dev);
+
 #ifdef CONFIG_XFRM
        free_SAs(pkt_dev);
 #endif
        vfree(pkt_dev->flows);
        if (pkt_dev->page)
                put_page(pkt_dev->page);
-       kfree(pkt_dev);
+       kfree_rcu(pkt_dev, rcu);
        return 0;
 }
 
@@ -3812,6 +3810,7 @@ static void __exit pg_cleanup(void)
 {
        unregister_netdevice_notifier(&pktgen_notifier_block);
        unregister_pernet_subsys(&pg_net_ops);
+       /* Don't need rcu_barrier() due to use of kfree_rcu() */
 }
 
 module_init(pg_init);
index d3027a7..4eab4a9 100644 (file)
  * test_8021q:
  *   jneq #0x8100, test_ieee1588   ; ETH_P_8021Q ?
  *   ldh [16]                      ; load inner type
- *   jneq #0x88f7, drop_ieee1588   ; ETH_P_1588 ?
+ *   jneq #0x88f7, test_8021q_ipv4 ; ETH_P_1588 ?
  *   ldb [18]                      ; load payload
  *   and #0x8                      ; as we don't have ports here, test
  *   jneq #0x0, drop_ieee1588      ; for PTP_GEN_BIT and drop these
  *   ldh [18]                      ; reload payload
  *   and #0xf                      ; mask PTP_CLASS_VMASK
- *   or #0x40                      ; PTP_CLASS_V2_VLAN
+ *   or #0x70                      ; PTP_CLASS_VLAN|PTP_CLASS_L2
+ *   ret a                         ; return PTP class
+ *
+ * ; PTP over UDP over IPv4 over 802.1Q over Ethernet
+ * test_8021q_ipv4:
+ *   jneq #0x800, test_8021q_ipv6  ; ETH_P_IP ?
+ *   ldb [27]                      ; load proto
+ *   jneq #17, drop_8021q_ipv4     ; IPPROTO_UDP ?
+ *   ldh [24]                      ; load frag offset field
+ *   jset #0x1fff, drop_8021q_ipv4; don't allow fragments
+ *   ldxb 4*([18]&0xf)             ; load IP header len
+ *   ldh [x + 20]                  ; load UDP dst port
+ *   jneq #319, drop_8021q_ipv4    ; is port PTP_EV_PORT ?
+ *   ldh [x + 26]                  ; load payload
+ *   and #0xf                      ; mask PTP_CLASS_VMASK
+ *   or #0x50                      ; PTP_CLASS_VLAN|PTP_CLASS_IPV4
+ *   ret a                         ; return PTP class
+ *   drop_8021q_ipv4: ret #0x0     ; PTP_CLASS_NONE
+ *
+ * ; PTP over UDP over IPv6 over 802.1Q over Ethernet
+ * test_8021q_ipv6:
+ *   jneq #0x86dd, drop_8021q_ipv6 ; ETH_P_IPV6 ?
+ *   ldb [24]                      ; load proto
+ *   jneq #17, drop_8021q_ipv6           ; IPPROTO_UDP ?
+ *   ldh [60]                      ; load UDP dst port
+ *   jneq #319, drop_8021q_ipv6          ; is port PTP_EV_PORT ?
+ *   ldh [66]                      ; load payload
+ *   and #0xf                      ; mask PTP_CLASS_VMASK
+ *   or #0x60                      ; PTP_CLASS_VLAN|PTP_CLASS_IPV6
  *   ret a                         ; return PTP class
+ *   drop_8021q_ipv6: ret #0x0     ; PTP_CLASS_NONE
  *
  * ; PTP over Ethernet
  * test_ieee1588:
 #include <linux/filter.h>
 #include <linux/ptp_classify.h>
 
-static struct sk_filter *ptp_insns __read_mostly;
+static struct bpf_prog *ptp_insns __read_mostly;
 
 unsigned int ptp_classify_raw(const struct sk_buff *skb)
 {
-       return SK_RUN_FILTER(ptp_insns, skb);
+       return BPF_PROG_RUN(ptp_insns, skb);
 }
 EXPORT_SYMBOL_GPL(ptp_classify_raw);
 
@@ -113,16 +142,39 @@ void __init ptp_classifier_init(void)
                { 0x44,  0,  0, 0x00000020 },
                { 0x16,  0,  0, 0x00000000 },
                { 0x06,  0,  0, 0x00000000 },
-               { 0x15,  0,  9, 0x00008100 },
+               { 0x15,  0, 32, 0x00008100 },
                { 0x28,  0,  0, 0x00000010 },
-               { 0x15,  0, 15, 0x000088f7 },
+               { 0x15,  0,  7, 0x000088f7 },
                { 0x30,  0,  0, 0x00000012 },
                { 0x54,  0,  0, 0x00000008 },
-               { 0x15,  0, 12, 0x00000000 },
+               { 0x15,  0, 35, 0x00000000 },
                { 0x28,  0,  0, 0x00000012 },
                { 0x54,  0,  0, 0x0000000f },
-               { 0x44,  0,  0, 0x00000040 },
+               { 0x44,  0,  0, 0x00000070 },
+               { 0x16,  0,  0, 0x00000000 },
+               { 0x15,  0, 12, 0x00000800 },
+               { 0x30,  0,  0, 0x0000001b },
+               { 0x15,  0,  9, 0x00000011 },
+               { 0x28,  0,  0, 0x00000018 },
+               { 0x45,  7,  0, 0x00001fff },
+               { 0xb1,  0,  0, 0x00000012 },
+               { 0x48,  0,  0, 0x00000014 },
+               { 0x15,  0,  4, 0x0000013f },
+               { 0x48,  0,  0, 0x0000001a },
+               { 0x54,  0,  0, 0x0000000f },
+               { 0x44,  0,  0, 0x00000050 },
+               { 0x16,  0,  0, 0x00000000 },
+               { 0x06,  0,  0, 0x00000000 },
+               { 0x15,  0,  8, 0x000086dd },
+               { 0x30,  0,  0, 0x00000018 },
+               { 0x15,  0,  6, 0x00000011 },
+               { 0x28,  0,  0, 0x0000003c },
+               { 0x15,  0,  4, 0x0000013f },
+               { 0x28,  0,  0, 0x00000042 },
+               { 0x54,  0,  0, 0x0000000f },
+               { 0x44,  0,  0, 0x00000060 },
                { 0x16,  0,  0, 0x00000000 },
+               { 0x06,  0,  0, 0x00000000 },
                { 0x15,  0,  7, 0x000088f7 },
                { 0x30,  0,  0, 0x0000000e },
                { 0x54,  0,  0, 0x00000008 },
@@ -137,5 +189,5 @@ void __init ptp_classifier_init(void)
                .len = ARRAY_SIZE(ptp_filter), .filter = ptp_filter,
        };
 
-       BUG_ON(sk_unattached_filter_create(&ptp_insns, &ptp_prog));
+       BUG_ON(bpf_prog_create(&ptp_insns, &ptp_prog));
 }
index 467f326..04db318 100644 (file)
@@ -41,27 +41,27 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
                      unsigned int nr_table_entries)
 {
        size_t lopt_size = sizeof(struct listen_sock);
-       struct listen_sock *lopt;
+       struct listen_sock *lopt = NULL;
 
        nr_table_entries = min_t(u32, nr_table_entries, sysctl_max_syn_backlog);
        nr_table_entries = max_t(u32, nr_table_entries, 8);
        nr_table_entries = roundup_pow_of_two(nr_table_entries + 1);
        lopt_size += nr_table_entries * sizeof(struct request_sock *);
-       if (lopt_size > PAGE_SIZE)
+
+       if (lopt_size <= (PAGE_SIZE << PAGE_ALLOC_COSTLY_ORDER))
+               lopt = kzalloc(lopt_size, GFP_KERNEL |
+                                         __GFP_NOWARN |
+                                         __GFP_NORETRY);
+       if (!lopt)
                lopt = vzalloc(lopt_size);
-       else
-               lopt = kzalloc(lopt_size, GFP_KERNEL);
-       if (lopt == NULL)
+       if (!lopt)
                return -ENOMEM;
 
-       for (lopt->max_qlen_log = 3;
-            (1 << lopt->max_qlen_log) < nr_table_entries;
-            lopt->max_qlen_log++);
-
        get_random_bytes(&lopt->hash_rnd, sizeof(lopt->hash_rnd));
        rwlock_init(&queue->syn_wait_lock);
        queue->rskq_accept_head = NULL;
        lopt->nr_table_entries = nr_table_entries;
+       lopt->max_qlen_log = ilog2(nr_table_entries);
 
        write_lock_bh(&queue->syn_wait_lock);
        queue->listen_opt = lopt;
@@ -72,22 +72,8 @@ int reqsk_queue_alloc(struct request_sock_queue *queue,
 
 void __reqsk_queue_destroy(struct request_sock_queue *queue)
 {
-       struct listen_sock *lopt;
-       size_t lopt_size;
-
-       /*
-        * this is an error recovery path only
-        * no locking needed and the lopt is not NULL
-        */
-
-       lopt = queue->listen_opt;
-       lopt_size = sizeof(struct listen_sock) +
-               lopt->nr_table_entries * sizeof(struct request_sock *);
-
-       if (lopt_size > PAGE_SIZE)
-               vfree(lopt);
-       else
-               kfree(lopt);
+       /* This is an error recovery path only, no locking needed */
+       kvfree(queue->listen_opt);
 }
 
 static inline struct listen_sock *reqsk_queue_yank_listen_sk(
@@ -107,8 +93,6 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
 {
        /* make all the listen_opt local to us */
        struct listen_sock *lopt = reqsk_queue_yank_listen_sk(queue);
-       size_t lopt_size = sizeof(struct listen_sock) +
-               lopt->nr_table_entries * sizeof(struct request_sock *);
 
        if (lopt->qlen != 0) {
                unsigned int i;
@@ -125,10 +109,7 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
        }
 
        WARN_ON(lopt->qlen != 0);
-       if (lopt_size > PAGE_SIZE)
-               vfree(lopt);
-       else
-               kfree(lopt);
+       kvfree(lopt);
 }
 
 /*
index 1063996..8d39071 100644 (file)
@@ -299,7 +299,12 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
        if (rtnl_link_ops_get(ops->kind))
                return -EEXIST;
 
-       if (!ops->dellink)
+       /* The check for setup is here because if ops
+        * does not have that filled up, it is not possible
+        * to use the ops for creating device. So do not
+        * fill up dellink as well. That disables rtnl_dellink.
+        */
+       if (ops->setup && !ops->dellink)
                ops->dellink = unregister_netdevice_queue;
 
        list_add_tail(&ops->list, &link_ops);
@@ -1777,7 +1782,7 @@ static int rtnl_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
                return -ENODEV;
 
        ops = dev->rtnl_link_ops;
-       if (!ops)
+       if (!ops || !ops->dellink)
                return -EOPNOTSUPP;
 
        ops->dellink(dev, &list_kill);
@@ -1805,7 +1810,8 @@ int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm)
 EXPORT_SYMBOL(rtnl_configure_link);
 
 struct net_device *rtnl_create_link(struct net *net,
-       char *ifname, const struct rtnl_link_ops *ops, struct nlattr *tb[])
+       char *ifname, unsigned char name_assign_type,
+       const struct rtnl_link_ops *ops, struct nlattr *tb[])
 {
        int err;
        struct net_device *dev;
@@ -1823,8 +1829,8 @@ struct net_device *rtnl_create_link(struct net *net,
                num_rx_queues = ops->get_num_rx_queues();
 
        err = -ENOMEM;
-       dev = alloc_netdev_mqs(ops->priv_size, ifname, ops->setup,
-                              num_tx_queues, num_rx_queues);
+       dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
+                              ops->setup, num_tx_queues, num_rx_queues);
        if (!dev)
                goto err;
 
@@ -1889,6 +1895,7 @@ static int rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh)
        char ifname[IFNAMSIZ];
        struct nlattr *tb[IFLA_MAX+1];
        struct nlattr *linkinfo[IFLA_INFO_MAX+1];
+       unsigned char name_assign_type = NET_NAME_USER;
        int err;
 
 #ifdef CONFIG_MODULES
@@ -2038,14 +2045,19 @@ replay:
                        return -EOPNOTSUPP;
                }
 
-               if (!ifname[0])
+               if (!ops->setup)
+                       return -EOPNOTSUPP;
+
+               if (!ifname[0]) {
                        snprintf(ifname, IFNAMSIZ, "%s%%d", ops->kind);
+                       name_assign_type = NET_NAME_ENUM;
+               }
 
                dest_net = rtnl_link_get_net(net, tb);
                if (IS_ERR(dest_net))
                        return PTR_ERR(dest_net);
 
-               dev = rtnl_create_link(dest_net, ifname, ops, tb);
+               dev = rtnl_create_link(dest_net, ifname, name_assign_type, ops, tb);
                if (IS_ERR(dev)) {
                        err = PTR_ERR(dev);
                        goto out;
@@ -2380,22 +2392,20 @@ int ndo_dflt_fdb_del(struct ndmsg *ndm,
                     struct net_device *dev,
                     const unsigned char *addr)
 {
-       int err = -EOPNOTSUPP;
+       int err = -EINVAL;
 
        /* If aging addresses are supported device will need to
         * implement its own handler for this.
         */
        if (!(ndm->ndm_state & NUD_PERMANENT)) {
                pr_info("%s: FDB only supports static addresses\n", dev->name);
-               return -EINVAL;
+               return err;
        }
 
        if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr))
                err = dev_uc_del(dev, addr);
        else if (is_multicast_ether_addr(addr))
                err = dev_mc_del(dev, addr);
-       else
-               err = -EINVAL;
 
        return err;
 }
@@ -2509,6 +2519,7 @@ skip:
 int ndo_dflt_fdb_dump(struct sk_buff *skb,
                      struct netlink_callback *cb,
                      struct net_device *dev,
+                     struct net_device *filter_dev,
                      int idx)
 {
        int err;
@@ -2526,28 +2537,72 @@ EXPORT_SYMBOL(ndo_dflt_fdb_dump);
 
 static int rtnl_fdb_dump(struct sk_buff *skb, struct netlink_callback *cb)
 {
-       int idx = 0;
-       struct net *net = sock_net(skb->sk);
        struct net_device *dev;
+       struct nlattr *tb[IFLA_MAX+1];
+       struct net_device *bdev = NULL;
+       struct net_device *br_dev = NULL;
+       const struct net_device_ops *ops = NULL;
+       const struct net_device_ops *cops = NULL;
+       struct ifinfomsg *ifm = nlmsg_data(cb->nlh);
+       struct net *net = sock_net(skb->sk);
+       int brport_idx = 0;
+       int br_idx = 0;
+       int idx = 0;
 
-       rcu_read_lock();
-       for_each_netdev_rcu(net, dev) {
-               if (dev->priv_flags & IFF_BRIDGE_PORT) {
-                       struct net_device *br_dev;
-                       const struct net_device_ops *ops;
+       if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX,
+                       ifla_policy) == 0) {
+               if (tb[IFLA_MASTER])
+                       br_idx = nla_get_u32(tb[IFLA_MASTER]);
+       }
+
+       brport_idx = ifm->ifi_index;
 
-                       br_dev = netdev_master_upper_dev_get(dev);
-                       ops = br_dev->netdev_ops;
-                       if (ops->ndo_fdb_dump)
-                               idx = ops->ndo_fdb_dump(skb, cb, dev, idx);
+       if (br_idx) {
+               br_dev = __dev_get_by_index(net, br_idx);
+               if (!br_dev)
+                       return -ENODEV;
+
+               ops = br_dev->netdev_ops;
+               bdev = br_dev;
+       }
+
+       for_each_netdev(net, dev) {
+               if (brport_idx && (dev->ifindex != brport_idx))
+                       continue;
+
+               if (!br_idx) { /* user did not specify a specific bridge */
+                       if (dev->priv_flags & IFF_BRIDGE_PORT) {
+                               br_dev = netdev_master_upper_dev_get(dev);
+                               cops = br_dev->netdev_ops;
+                       }
+
+                       bdev = dev;
+               } else {
+                       if (dev != br_dev &&
+                           !(dev->priv_flags & IFF_BRIDGE_PORT))
+                               continue;
+
+                       if (br_dev != netdev_master_upper_dev_get(dev) &&
+                           !(dev->priv_flags & IFF_EBRIDGE))
+                               continue;
+
+                       bdev = br_dev;
+                       cops = ops;
+               }
+
+               if (dev->priv_flags & IFF_BRIDGE_PORT) {
+                       if (cops && cops->ndo_fdb_dump)
+                               idx = cops->ndo_fdb_dump(skb, cb, br_dev, dev,
+                                                        idx);
                }
 
+               idx = ndo_dflt_fdb_dump(skb, cb, dev, NULL, idx);
                if (dev->netdev_ops->ndo_fdb_dump)
-                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, dev, idx);
-               else
-                       idx = ndo_dflt_fdb_dump(skb, cb, dev, idx);
+                       idx = dev->netdev_ops->ndo_fdb_dump(skb, cb, bdev, dev,
+                                                           idx);
+
+               cops = NULL;
        }
-       rcu_read_unlock();
 
        cb->args[0] = idx;
        return skb->len;
index 9cd5344..c1a3303 100644 (file)
@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
                                                            skb_put(nskb, len),
                                                            len, 0);
                        SKB_GSO_CB(nskb)->csum_start =
-                           skb_headroom(nskb) + offset;
+                           skb_headroom(nskb) + doffset;
                        continue;
                }
 
index 026e01f..a741163 100644 (file)
@@ -491,7 +491,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
 
        skb->dev = NULL;
 
-       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
+       if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
                atomic_inc(&sk->sk_drops);
                goto discard_and_relse;
        }
@@ -862,8 +862,6 @@ set_rcvbuf:
                                               (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE));
                sock_valbool_flag(sk, SOCK_TIMESTAMPING_SOFTWARE,
                                  val & SOF_TIMESTAMPING_SOFTWARE);
-               sock_valbool_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE,
-                                 val & SOF_TIMESTAMPING_SYS_HARDWARE);
                sock_valbool_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE,
                                  val & SOF_TIMESTAMPING_RAW_HARDWARE);
                break;
@@ -1102,8 +1100,6 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
                        v.val |= SOF_TIMESTAMPING_RX_SOFTWARE;
                if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE))
                        v.val |= SOF_TIMESTAMPING_SOFTWARE;
-               if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE))
-                       v.val |= SOF_TIMESTAMPING_SYS_HARDWARE;
                if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE))
                        v.val |= SOF_TIMESTAMPING_RAW_HARDWARE;
                break;
@@ -1478,6 +1474,7 @@ static void sk_update_clone(const struct sock *sk, struct sock *newsk)
 struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 {
        struct sock *newsk;
+       bool is_charged = true;
 
        newsk = sk_prot_alloc(sk->sk_prot, priority, sk->sk_family);
        if (newsk != NULL) {
@@ -1522,9 +1519,13 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
 
                filter = rcu_dereference_protected(newsk->sk_filter, 1);
                if (filter != NULL)
-                       sk_filter_charge(newsk, filter);
+                       /* though it's an empty new sock, the charging may fail
+                        * if sysctl_optmem_max was changed between creation of
+                        * original socket and cloning
+                        */
+                       is_charged = sk_filter_charge(newsk, filter);
 
-               if (unlikely(xfrm_sk_clone_policy(newsk))) {
+               if (unlikely(!is_charged || xfrm_sk_clone_policy(newsk))) {
                        /* It is still raw copy of parent, so invalidate
                         * destructor and make plain sk_free() */
                        newsk->sk_destruct = NULL;
index a4216a4..ad704c7 100644 (file)
@@ -68,8 +68,8 @@ int sock_diag_put_filterinfo(bool may_report_filterinfo, struct sock *sk,
        if (!filter)
                goto out;
 
-       fprog = filter->orig_prog;
-       flen = sk_filter_proglen(fprog);
+       fprog = filter->prog->orig_prog;
+       flen = bpf_classic_proglen(fprog);
 
        attr = nla_reserve(skb, attrtype, flen);
        if (attr == NULL) {
index 6521dfd..a877039 100644 (file)
@@ -43,31 +43,22 @@ void skb_clone_tx_timestamp(struct sk_buff *skb)
                return;
 
        type = classify(skb);
+       if (type == PTP_CLASS_NONE)
+               return;
+
+       phydev = skb->dev->phydev;
+       if (likely(phydev->drv->txtstamp)) {
+               if (!atomic_inc_not_zero(&sk->sk_refcnt))
+                       return;
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV4:
-       case PTP_CLASS_V2_IPV6:
-       case PTP_CLASS_V2_L2:
-       case PTP_CLASS_V2_VLAN:
-               phydev = skb->dev->phydev;
-               if (likely(phydev->drv->txtstamp)) {
-                       if (!atomic_inc_not_zero(&sk->sk_refcnt))
-                               return;
-
-                       clone = skb_clone(skb, GFP_ATOMIC);
-                       if (!clone) {
-                               sock_put(sk);
-                               return;
-                       }
-
-                       clone->sk = sk;
-                       phydev->drv->txtstamp(phydev, clone, type);
+               clone = skb_clone(skb, GFP_ATOMIC);
+               if (!clone) {
+                       sock_put(sk);
+                       return;
                }
-               break;
-       default:
-               break;
+
+               clone->sk = sk;
+               phydev->drv->txtstamp(phydev, clone, type);
        }
 }
 EXPORT_SYMBOL_GPL(skb_clone_tx_timestamp);
@@ -114,20 +105,12 @@ bool skb_defer_rx_timestamp(struct sk_buff *skb)
 
        __skb_pull(skb, ETH_HLEN);
 
-       switch (type) {
-       case PTP_CLASS_V1_IPV4:
-       case PTP_CLASS_V1_IPV6:
-       case PTP_CLASS_V2_IPV4:
-       case PTP_CLASS_V2_IPV6:
-       case PTP_CLASS_V2_L2:
-       case PTP_CLASS_V2_VLAN:
-               phydev = skb->dev->phydev;
-               if (likely(phydev->drv->rxtstamp))
-                       return phydev->drv->rxtstamp(phydev, skb, type);
-               break;
-       default:
-               break;
-       }
+       if (type == PTP_CLASS_NONE)
+               return false;
+
+       phydev = skb->dev->phydev;
+       if (likely(phydev->drv->rxtstamp))
+               return phydev->drv->rxtstamp(phydev, skb, type);
 
        return false;
 }
index f8b98d8..ca11d28 100644 (file)
@@ -471,7 +471,11 @@ static int dcbnl_getapp(struct net_device *netdev, struct nlmsghdr *nlh,
        id = nla_get_u16(app_tb[DCB_APP_ATTR_ID]);
 
        if (netdev->dcbnl_ops->getapp) {
-               up = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+               ret = netdev->dcbnl_ops->getapp(netdev, idtype, id);
+               if (ret < 0)
+                       return ret;
+               else
+                       up = ret;
        } else {
                struct dcb_app app = {
                                        .selector = idtype,
@@ -538,6 +542,8 @@ static int dcbnl_setapp(struct net_device *netdev, struct nlmsghdr *nlh,
 
        if (netdev->dcbnl_ops->setapp) {
                ret = netdev->dcbnl_ops->setapp(netdev, idtype, id, up);
+               if (ret < 0)
+                       return ret;
        } else {
                struct dcb_app app;
                app.selector = idtype;
@@ -1770,7 +1776,7 @@ EXPORT_SYMBOL(dcb_getapp);
  *
  * Priority 0 is an invalid priority in CEE spec. This routine
  * removes applications from the app list if the priority is
- * set to zero.
+ * set to zero. Priority is expected to be 8-bit 802.1p user priority bitmap
  */
 int dcb_setapp(struct net_device *dev, struct dcb_app *new)
 {
@@ -1831,7 +1837,8 @@ EXPORT_SYMBOL(dcb_ieee_getapp_mask);
  *
  * This adds Application data to the list. Multiple application
  * entries may exists for the same selector and protocol as long
- * as the priorities are different.
+ * as the priorities are different. Priority is expected to be a
+ * 3-bit unsigned integer
  */
 int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
 {
index 4db3c2a..04cb17d 100644 (file)
@@ -386,7 +386,7 @@ static int dccp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
        if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1)
                goto drop;
 
-       req = inet6_reqsk_alloc(&dccp6_request_sock_ops);
+       req = inet_reqsk_alloc(&dccp6_request_sock_ops);
        if (req == NULL)
                goto drop;
 
index c69eb9c..b50dc43 100644 (file)
@@ -55,11 +55,9 @@ void dccp_time_wait(struct sock *sk, int state, int timeo)
                const int rto = (icsk->icsk_rto << 2) - (icsk->icsk_rto >> 1);
 #if IS_ENABLED(CONFIG_IPV6)
                if (tw->tw_family == PF_INET6) {
-                       const struct ipv6_pinfo *np = inet6_sk(sk);
-
                        tw->tw_v6_daddr = sk->sk_v6_daddr;
                        tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
-                       tw->tw_ipv6only = np->ipv6only;
+                       tw->tw_ipv6only = sk->sk_ipv6only;
                }
 #endif
                /* Linkage updates. */
index 9acec61..dd8696a 100644 (file)
@@ -150,7 +150,7 @@ int dns_query(const char *type, const char *name, size_t namelen,
                goto put;
 
        memcpy(*_result, upayload->data, len);
-       *_result[len] = '\0';
+       (*_result)[len] = '\0';
 
        if (_expiry)
                *_expiry = rkey->expiry;
index 5db37ce..0a49632 100644 (file)
@@ -351,8 +351,7 @@ static void dsa_of_free_platform_data(struct dsa_platform_data *pd)
        for (i = 0; i < pd->nr_chips; i++) {
                port_index = 0;
                while (port_index < DSA_MAX_PORTS) {
-                       if (pd->chip[i].port_names[port_index])
-                               kfree(pd->chip[i].port_names[port_index]);
+                       kfree(pd->chip[i].port_names[port_index]);
                        port_index++;
                }
                kfree(pd->chip[i].rtable);
index 64c5af0..45a1e34 100644 (file)
@@ -340,8 +340,8 @@ dsa_slave_create(struct dsa_switch *ds, struct device *parent,
        struct dsa_slave_priv *p;
        int ret;
 
-       slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv),
-                                name, ether_setup);
+       slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name,
+                                NET_NAME_UNKNOWN, ether_setup);
        if (slave_dev == NULL)
                return slave_dev;
 
index 5dc638c..f405e05 100644 (file)
@@ -390,7 +390,8 @@ EXPORT_SYMBOL(ether_setup);
 struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
                                      unsigned int rxqs)
 {
-       return alloc_netdev_mqs(sizeof_priv, "eth%d", ether_setup, txqs, rxqs);
+       return alloc_netdev_mqs(sizeof_priv, "eth%d", NET_NAME_UNKNOWN,
+                               ether_setup, txqs, rxqs);
 }
 EXPORT_SYMBOL(alloc_etherdev_mqs);
 
index b68359f..9ae972a 100644 (file)
@@ -4,4 +4,5 @@
 
 obj-$(CONFIG_HSR)      += hsr.o
 
-hsr-y                  := hsr_main.o hsr_framereg.o hsr_device.o hsr_netlink.o
+hsr-y                  := hsr_main.o hsr_framereg.o hsr_device.o \
+                          hsr_netlink.o hsr_slave.o hsr_forward.o
index e5302b7..a138d75 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  *
  * This file contains device methods for creating, using and destroying
  * virtual HSR devices.
 #include <linux/netdevice.h>
 #include <linux/skbuff.h>
 #include <linux/etherdevice.h>
-#include <linux/if_arp.h>
 #include <linux/rtnetlink.h>
 #include <linux/pkt_sched.h>
 #include "hsr_device.h"
+#include "hsr_slave.h"
 #include "hsr_framereg.h"
 #include "hsr_main.h"
+#include "hsr_forward.h"
 
 
 static bool is_admin_up(struct net_device *dev)
@@ -45,75 +46,108 @@ static void __hsr_set_operstate(struct net_device *dev, int transition)
        }
 }
 
-void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
-                      struct net_device *slave2)
+static void hsr_set_operstate(struct hsr_port *master, bool has_carrier)
 {
-       if (!is_admin_up(hsr_dev)) {
-               __hsr_set_operstate(hsr_dev, IF_OPER_DOWN);
+       if (!is_admin_up(master->dev)) {
+               __hsr_set_operstate(master->dev, IF_OPER_DOWN);
                return;
        }
 
-       if (is_slave_up(slave1) || is_slave_up(slave2))
-               __hsr_set_operstate(hsr_dev, IF_OPER_UP);
+       if (has_carrier)
+               __hsr_set_operstate(master->dev, IF_OPER_UP);
        else
-               __hsr_set_operstate(hsr_dev, IF_OPER_LOWERLAYERDOWN);
+               __hsr_set_operstate(master->dev, IF_OPER_LOWERLAYERDOWN);
 }
 
-void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
-                    struct net_device *slave2)
+static bool hsr_check_carrier(struct hsr_port *master)
 {
-       if (is_slave_up(slave1) || is_slave_up(slave2))
-               netif_carrier_on(hsr_dev);
+       struct hsr_port *port;
+       bool has_carrier;
+
+       has_carrier = false;
+
+       rcu_read_lock();
+       hsr_for_each_port(master->hsr, port)
+               if ((port->type != HSR_PT_MASTER) && is_slave_up(port->dev)) {
+                       has_carrier = true;
+                       break;
+               }
+       rcu_read_unlock();
+
+       if (has_carrier)
+               netif_carrier_on(master->dev);
        else
-               netif_carrier_off(hsr_dev);
+               netif_carrier_off(master->dev);
+
+       return has_carrier;
 }
 
 
-void hsr_check_announce(struct net_device *hsr_dev, int old_operstate)
+static void hsr_check_announce(struct net_device *hsr_dev,
+                              unsigned char old_operstate)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
 
-       hsr_priv = netdev_priv(hsr_dev);
+       hsr = netdev_priv(hsr_dev);
 
        if ((hsr_dev->operstate == IF_OPER_UP) && (old_operstate != IF_OPER_UP)) {
                /* Went up */
-               hsr_priv->announce_count = 0;
-               hsr_priv->announce_timer.expires = jiffies +
+               hsr->announce_count = 0;
+               hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
-               add_timer(&hsr_priv->announce_timer);
+               add_timer(&hsr->announce_timer);
        }
 
        if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
                /* Went down */
-               del_timer(&hsr_priv->announce_timer);
+               del_timer(&hsr->announce_timer);
 }
 
-
-int hsr_get_max_mtu(struct hsr_priv *hsr_priv)
+void hsr_check_carrier_and_operstate(struct hsr_priv *hsr)
 {
-       int mtu_max;
-
-       if (hsr_priv->slave[0] && hsr_priv->slave[1])
-               mtu_max = min(hsr_priv->slave[0]->mtu, hsr_priv->slave[1]->mtu);
-       else if (hsr_priv->slave[0])
-               mtu_max = hsr_priv->slave[0]->mtu;
-       else if (hsr_priv->slave[1])
-               mtu_max = hsr_priv->slave[1]->mtu;
-       else
-               mtu_max = HSR_TAGLEN;
+       struct hsr_port *master;
+       unsigned char old_operstate;
+       bool has_carrier;
 
-       return mtu_max - HSR_TAGLEN;
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       /* netif_stacked_transfer_operstate() cannot be used here since
+        * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
+        */
+       old_operstate = master->dev->operstate;
+       has_carrier = hsr_check_carrier(master);
+       hsr_set_operstate(master, has_carrier);
+       hsr_check_announce(master->dev, old_operstate);
 }
 
+int hsr_get_max_mtu(struct hsr_priv *hsr)
+{
+       unsigned int mtu_max;
+       struct hsr_port *port;
+
+       mtu_max = ETH_DATA_LEN;
+       rcu_read_lock();
+       hsr_for_each_port(hsr, port)
+               if (port->type != HSR_PT_MASTER)
+                       mtu_max = min(port->dev->mtu, mtu_max);
+       rcu_read_unlock();
+
+       if (mtu_max < HSR_HLEN)
+               return 0;
+       return mtu_max - HSR_HLEN;
+}
+
+
 static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *master;
 
-       hsr_priv = netdev_priv(dev);
+       hsr = netdev_priv(dev);
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
 
-       if (new_mtu > hsr_get_max_mtu(hsr_priv)) {
-               netdev_info(hsr_priv->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
-                           HSR_TAGLEN);
+       if (new_mtu > hsr_get_max_mtu(hsr)) {
+               netdev_info(master->dev, "A HSR master's MTU cannot be greater than the smallest MTU of its slaves minus the HSR Tag length (%d octets).\n",
+                           HSR_HLEN);
                return -EINVAL;
        }
 
@@ -124,164 +158,95 @@ static int hsr_dev_change_mtu(struct net_device *dev, int new_mtu)
 
 static int hsr_dev_open(struct net_device *dev)
 {
-       struct hsr_priv *hsr_priv;
-       int i;
-       char *slave_name;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
+       char designation;
 
-       hsr_priv = netdev_priv(dev);
+       hsr = netdev_priv(dev);
+       designation = '\0';
 
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               if (hsr_priv->slave[i])
-                       slave_name = hsr_priv->slave[i]->name;
-               else
-                       slave_name = "null";
-
-               if (!is_slave_up(hsr_priv->slave[i]))
-                       netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a working HSR network\n",
-                                   'A' + i, slave_name);
+       rcu_read_lock();
+       hsr_for_each_port(hsr, port) {
+               if (port->type == HSR_PT_MASTER)
+                       continue;
+               switch (port->type) {
+               case HSR_PT_SLAVE_A:
+                       designation = 'A';
+                       break;
+               case HSR_PT_SLAVE_B:
+                       designation = 'B';
+                       break;
+               default:
+                       designation = '?';
+               }
+               if (!is_slave_up(port->dev))
+                       netdev_warn(dev, "Slave %c (%s) is not up; please bring it up to get a fully working HSR network\n",
+                                   designation, port->dev->name);
        }
+       rcu_read_unlock();
+
+       if (designation == '\0')
+               netdev_warn(dev, "No slave devices configured\n");
 
        return 0;
 }
 
+
 static int hsr_dev_close(struct net_device *dev)
 {
-       /* Nothing to do here. We could try to restore the state of the slaves
-        * to what they were before being changed by the hsr master dev's state,
-        * but they might have been changed manually in the mean time too, so
-        * taking them up or down here might be confusing and is probably not a
-        * good idea.
-        */
+       /* Nothing to do here. */
        return 0;
 }
 
 
-static void hsr_fill_tag(struct hsr_ethhdr *hsr_ethhdr, struct hsr_priv *hsr_priv)
+static netdev_features_t hsr_features_recompute(struct hsr_priv *hsr,
+                                               netdev_features_t features)
 {
-       unsigned long irqflags;
+       netdev_features_t mask;
+       struct hsr_port *port;
 
-       /* IEC 62439-1:2010, p 48, says the 4-bit "path" field can take values
-        * between 0001-1001 ("ring identifier", for regular HSR frames),
-        * or 1111 ("HSR management", supervision frames). Unfortunately, the
-        * spec writers forgot to explain what a "ring identifier" is, or
-        * how it is used. So we just set this to 0001 for regular frames,
-        * and 1111 for supervision frames.
-        */
-       set_hsr_tag_path(&hsr_ethhdr->hsr_tag, 0x1);
+       mask = features;
 
-       /* IEC 62439-1:2010, p 12: "The link service data unit in an Ethernet
-        * frame is the content of the frame located between the Length/Type
-        * field and the Frame Check Sequence."
+       /* Mask out all features that, if supported by one device, should be
+        * enabled for all devices (see NETIF_F_ONE_FOR_ALL).
         *
-        * IEC 62439-3, p 48, specifies the "original LPDU" to include the
-        * original "LT" field (what "LT" means is not explained anywhere as
-        * far as I can see - perhaps "Length/Type"?). So LSDU_size might
-        * equal original length + 2.
-        *   Also, the fact that this field is not used anywhere (might be used
-        * by a RedBox connecting HSR and PRP nets?) means I cannot test its
-        * correctness. Instead of guessing, I set this to 0 here, to make any
-        * problems immediately apparent. Anyone using this driver with PRP/HSR
-        * RedBoxes might need to fix this...
+        * Anything that's off in mask will not be enabled - so only things
+        * that were in features originally, and also is in NETIF_F_ONE_FOR_ALL,
+        * may become enabled.
         */
-       set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, 0);
-
-       spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
-       hsr_ethhdr->hsr_tag.sequence_nr = htons(hsr_priv->sequence_nr);
-       hsr_priv->sequence_nr++;
-       spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+       features &= ~NETIF_F_ONE_FOR_ALL;
+       hsr_for_each_port(hsr, port)
+               features = netdev_increment_features(features,
+                                                    port->dev->features,
+                                                    mask);
 
-       hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
-
-       hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+       return features;
 }
 
-static int slave_xmit(struct sk_buff *skb, struct hsr_priv *hsr_priv,
-                     enum hsr_dev_idx dev_idx)
+static netdev_features_t hsr_fix_features(struct net_device *dev,
+                                         netdev_features_t features)
 {
-       struct hsr_ethhdr *hsr_ethhdr;
-
-       hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
+       struct hsr_priv *hsr = netdev_priv(dev);
 
-       skb->dev = hsr_priv->slave[dev_idx];
-
-       hsr_addr_subst_dest(hsr_priv, &hsr_ethhdr->ethhdr, dev_idx);
-
-       /* Address substitution (IEC62439-3 pp 26, 50): replace mac
-        * address of outgoing frame with that of the outgoing slave's.
-        */
-       ether_addr_copy(hsr_ethhdr->ethhdr.h_source, skb->dev->dev_addr);
-
-       return dev_queue_xmit(skb);
+       return hsr_features_recompute(hsr, features);
 }
 
 
 static int hsr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
 {
-       struct hsr_priv *hsr_priv;
-       struct hsr_ethhdr *hsr_ethhdr;
-       struct sk_buff *skb2;
-       int res1, res2;
-
-       hsr_priv = netdev_priv(dev);
-       hsr_ethhdr = (struct hsr_ethhdr *) skb->data;
-
-       if ((skb->protocol != htons(ETH_P_PRP)) ||
-           (hsr_ethhdr->ethhdr.h_proto != htons(ETH_P_PRP))) {
-               hsr_fill_tag(hsr_ethhdr, hsr_priv);
-               skb->protocol = htons(ETH_P_PRP);
-       }
-
-       skb2 = pskb_copy(skb, GFP_ATOMIC);
-
-       res1 = NET_XMIT_DROP;
-       if (likely(hsr_priv->slave[HSR_DEV_SLAVE_A]))
-               res1 = slave_xmit(skb, hsr_priv, HSR_DEV_SLAVE_A);
+       struct hsr_priv *hsr = netdev_priv(dev);
+       struct hsr_port *master;
 
-       res2 = NET_XMIT_DROP;
-       if (likely(skb2 && hsr_priv->slave[HSR_DEV_SLAVE_B]))
-               res2 = slave_xmit(skb2, hsr_priv, HSR_DEV_SLAVE_B);
-
-       if (likely(res1 == NET_XMIT_SUCCESS || res1 == NET_XMIT_CN ||
-                  res2 == NET_XMIT_SUCCESS || res2 == NET_XMIT_CN)) {
-               hsr_priv->dev->stats.tx_packets++;
-               hsr_priv->dev->stats.tx_bytes += skb->len;
-       } else {
-               hsr_priv->dev->stats.tx_dropped++;
-       }
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       skb->dev = master->dev;
+       hsr_forward_skb(skb, master);
 
        return NETDEV_TX_OK;
 }
 
 
-static int hsr_header_create(struct sk_buff *skb, struct net_device *dev,
-                            unsigned short type, const void *daddr,
-                            const void *saddr, unsigned int len)
-{
-       int res;
-
-       /* Make room for the HSR tag now. We will fill it in later (in
-        * hsr_dev_xmit)
-        */
-       if (skb_headroom(skb) < HSR_TAGLEN + ETH_HLEN)
-               return -ENOBUFS;
-       skb_push(skb, HSR_TAGLEN);
-
-       /* To allow VLAN/HSR combos we should probably use
-        * res = dev_hard_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
-        * here instead. It would require other changes too, though - e.g.
-        * separate headers for each slave etc...
-        */
-       res = eth_header(skb, dev, type, daddr, saddr, len + HSR_TAGLEN);
-       if (res <= 0)
-               return res;
-       skb_reset_mac_header(skb);
-
-       return res + HSR_TAGLEN;
-}
-
-
 static const struct header_ops hsr_header_ops = {
-       .create  = hsr_header_create,
+       .create  = eth_header,
        .parse   = eth_header_parse,
 };
 
@@ -291,67 +256,63 @@ static const struct header_ops hsr_header_ops = {
  */
 static int hsr_pad(int size)
 {
-       const int min_size = ETH_ZLEN - HSR_TAGLEN - ETH_HLEN;
+       const int min_size = ETH_ZLEN - HSR_HLEN - ETH_HLEN;
 
        if (size >= min_size)
                return size;
        return min_size;
 }
 
-static void send_hsr_supervision_frame(struct net_device *hsr_dev, u8 type)
+static void send_hsr_supervision_frame(struct hsr_port *master, u8 type)
 {
-       struct hsr_priv *hsr_priv;
        struct sk_buff *skb;
        int hlen, tlen;
        struct hsr_sup_tag *hsr_stag;
        struct hsr_sup_payload *hsr_sp;
        unsigned long irqflags;
 
-       hlen = LL_RESERVED_SPACE(hsr_dev);
-       tlen = hsr_dev->needed_tailroom;
+       hlen = LL_RESERVED_SPACE(master->dev);
+       tlen = master->dev->needed_tailroom;
        skb = alloc_skb(hsr_pad(sizeof(struct hsr_sup_payload)) + hlen + tlen,
                        GFP_ATOMIC);
 
        if (skb == NULL)
                return;
 
-       hsr_priv = netdev_priv(hsr_dev);
-
        skb_reserve(skb, hlen);
 
-       skb->dev = hsr_dev;
+       skb->dev = master->dev;
        skb->protocol = htons(ETH_P_PRP);
        skb->priority = TC_PRIO_CONTROL;
 
        if (dev_hard_header(skb, skb->dev, ETH_P_PRP,
-                           hsr_priv->sup_multicast_addr,
-                           skb->dev->dev_addr, skb->len) < 0)
+                           master->hsr->sup_multicast_addr,
+                           skb->dev->dev_addr, skb->len) <= 0)
                goto out;
+       skb_reset_mac_header(skb);
 
-       skb_pull(skb, sizeof(struct ethhdr));
-       hsr_stag = (typeof(hsr_stag)) skb->data;
+       hsr_stag = (typeof(hsr_stag)) skb_put(skb, sizeof(*hsr_stag));
 
        set_hsr_stag_path(hsr_stag, 0xf);
        set_hsr_stag_HSR_Ver(hsr_stag, 0);
 
-       spin_lock_irqsave(&hsr_priv->seqnr_lock, irqflags);
-       hsr_stag->sequence_nr = htons(hsr_priv->sequence_nr);
-       hsr_priv->sequence_nr++;
-       spin_unlock_irqrestore(&hsr_priv->seqnr_lock, irqflags);
+       spin_lock_irqsave(&master->hsr->seqnr_lock, irqflags);
+       hsr_stag->sequence_nr = htons(master->hsr->sequence_nr);
+       master->hsr->sequence_nr++;
+       spin_unlock_irqrestore(&master->hsr->seqnr_lock, irqflags);
 
        hsr_stag->HSR_TLV_Type = type;
        hsr_stag->HSR_TLV_Length = 12;
 
-       skb_push(skb, sizeof(struct ethhdr));
-
        /* Payload: MacAddressA */
        hsr_sp = (typeof(hsr_sp)) skb_put(skb, sizeof(*hsr_sp));
-       ether_addr_copy(hsr_sp->MacAddressA, hsr_dev->dev_addr);
+       ether_addr_copy(hsr_sp->MacAddressA, master->dev->dev_addr);
 
-       dev_queue_xmit(skb);
+       hsr_forward_skb(skb, master);
        return;
 
 out:
+       WARN_ON_ONCE("HSR: Could not send supervision frame\n");
        kfree_skb(skb);
 }
 
@@ -360,59 +321,32 @@ out:
  */
 static void hsr_announce(unsigned long data)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *master;
 
-       hsr_priv = (struct hsr_priv *) data;
+       hsr = (struct hsr_priv *) data;
 
-       if (hsr_priv->announce_count < 3) {
-               send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_ANNOUNCE);
-               hsr_priv->announce_count++;
+       rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+
+       if (hsr->announce_count < 3) {
+               send_hsr_supervision_frame(master, HSR_TLV_ANNOUNCE);
+               hsr->announce_count++;
        } else {
-               send_hsr_supervision_frame(hsr_priv->dev, HSR_TLV_LIFE_CHECK);
+               send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK);
        }
 
-       if (hsr_priv->announce_count < 3)
-               hsr_priv->announce_timer.expires = jiffies +
+       if (hsr->announce_count < 3)
+               hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
        else
-               hsr_priv->announce_timer.expires = jiffies +
+               hsr->announce_timer.expires = jiffies +
                                msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
 
-       if (is_admin_up(hsr_priv->dev))
-               add_timer(&hsr_priv->announce_timer);
-}
-
-
-static void restore_slaves(struct net_device *hsr_dev)
-{
-       struct hsr_priv *hsr_priv;
-       int i;
-       int res;
-
-       hsr_priv = netdev_priv(hsr_dev);
-
-       rtnl_lock();
-
-       /* Restore promiscuity */
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               if (!hsr_priv->slave[i])
-                       continue;
-               res = dev_set_promiscuity(hsr_priv->slave[i], -1);
-               if (res)
-                       netdev_info(hsr_dev,
-                                   "Cannot restore slave promiscuity (%s, %d)\n",
-                                   hsr_priv->slave[i]->name, res);
-       }
-
-       rtnl_unlock();
-}
-
-static void reclaim_hsr_dev(struct rcu_head *rh)
-{
-       struct hsr_priv *hsr_priv;
+       if (is_admin_up(master->dev))
+               add_timer(&hsr->announce_timer);
 
-       hsr_priv = container_of(rh, struct hsr_priv, rcu_head);
-       free_netdev(hsr_priv->dev);
+       rcu_read_unlock();
 }
 
 
@@ -421,14 +355,18 @@ static void reclaim_hsr_dev(struct rcu_head *rh)
  */
 static void hsr_dev_destroy(struct net_device *hsr_dev)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
 
-       hsr_priv = netdev_priv(hsr_dev);
+       hsr = netdev_priv(hsr_dev);
+       hsr_for_each_port(hsr, port)
+               hsr_del_port(port);
 
-       del_timer(&hsr_priv->announce_timer);
-       unregister_hsr_master(hsr_priv);    /* calls list_del_rcu on hsr_priv */
-       restore_slaves(hsr_dev);
-       call_rcu(&hsr_priv->rcu_head, reclaim_hsr_dev);   /* reclaim hsr_priv */
+       del_timer_sync(&hsr->prune_timer);
+       del_timer_sync(&hsr->announce_timer);
+
+       synchronize_rcu();
+       free_netdev(hsr_dev);
 }
 
 static const struct net_device_ops hsr_device_ops = {
@@ -436,62 +374,51 @@ static const struct net_device_ops hsr_device_ops = {
        .ndo_open = hsr_dev_open,
        .ndo_stop = hsr_dev_close,
        .ndo_start_xmit = hsr_dev_xmit,
+       .ndo_fix_features = hsr_fix_features,
 };
 
+static struct device_type hsr_type = {
+       .name = "hsr",
+};
 
 void hsr_dev_setup(struct net_device *dev)
 {
        random_ether_addr(dev->dev_addr);
 
        ether_setup(dev);
-       dev->header_ops          = &hsr_header_ops;
-       dev->netdev_ops          = &hsr_device_ops;
-       dev->tx_queue_len        = 0;
+       dev->header_ops = &hsr_header_ops;
+       dev->netdev_ops = &hsr_device_ops;
+       SET_NETDEV_DEVTYPE(dev, &hsr_type);
+       dev->tx_queue_len = 0;
 
        dev->destructor = hsr_dev_destroy;
+
+       dev->hw_features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_HIGHDMA |
+                          NETIF_F_GSO_MASK | NETIF_F_HW_CSUM |
+                          NETIF_F_HW_VLAN_CTAG_TX;
+
+       dev->features = dev->hw_features;
+
+       /* Prevent recursive tx locking */
+       dev->features |= NETIF_F_LLTX;
+       /* VLAN on top of HSR needs testing and probably some work on
+        * hsr_header_create() etc.
+        */
+       dev->features |= NETIF_F_VLAN_CHALLENGED;
+       /* Not sure about this. Taken from bridge code. netdev_features.h says
+        * it means "Does not change network namespaces".
+        */
+       dev->features |= NETIF_F_NETNS_LOCAL;
 }
 
 
 /* Return true if dev is a HSR master; return false otherwise.
  */
-bool is_hsr_master(struct net_device *dev)
+inline bool is_hsr_master(struct net_device *dev)
 {
        return (dev->netdev_ops->ndo_start_xmit == hsr_dev_xmit);
 }
 
-static int check_slave_ok(struct net_device *dev)
-{
-       /* Don't allow HSR on non-ethernet like devices */
-       if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
-           (dev->addr_len != ETH_ALEN)) {
-               netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
-               return -EINVAL;
-       }
-
-       /* Don't allow enslaving hsr devices */
-       if (is_hsr_master(dev)) {
-               netdev_info(dev, "Cannot create trees of HSR devices.\n");
-               return -EINVAL;
-       }
-
-       if (is_hsr_slave(dev)) {
-               netdev_info(dev, "This device is already a HSR slave.\n");
-               return -EINVAL;
-       }
-
-       if (dev->priv_flags & IFF_802_1Q_VLAN) {
-               netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
-               return -EINVAL;
-       }
-
-       /* HSR over bonded devices has not been tested, but I'm not sure it
-        * won't work...
-        */
-
-       return 0;
-}
-
-
 /* Default multicast address for HSR Supervision frames */
 static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
        0x01, 0x15, 0x4e, 0x00, 0x01, 0x00
@@ -500,97 +427,74 @@ static const unsigned char def_multicast_addr[ETH_ALEN] __aligned(2) = {
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec)
 {
-       struct hsr_priv *hsr_priv;
-       int i;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
        int res;
 
-       hsr_priv = netdev_priv(hsr_dev);
-       hsr_priv->dev = hsr_dev;
-       INIT_LIST_HEAD(&hsr_priv->node_db);
-       INIT_LIST_HEAD(&hsr_priv->self_node_db);
-       for (i = 0; i < HSR_MAX_SLAVE; i++)
-               hsr_priv->slave[i] = slave[i];
-
-       spin_lock_init(&hsr_priv->seqnr_lock);
-       /* Overflow soon to find bugs easier: */
-       hsr_priv->sequence_nr = USHRT_MAX - 1024;
-
-       init_timer(&hsr_priv->announce_timer);
-       hsr_priv->announce_timer.function = hsr_announce;
-       hsr_priv->announce_timer.data = (unsigned long) hsr_priv;
+       hsr = netdev_priv(hsr_dev);
+       INIT_LIST_HEAD(&hsr->ports);
+       INIT_LIST_HEAD(&hsr->node_db);
+       INIT_LIST_HEAD(&hsr->self_node_db);
 
-       ether_addr_copy(hsr_priv->sup_multicast_addr, def_multicast_addr);
-       hsr_priv->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
+       ether_addr_copy(hsr_dev->dev_addr, slave[0]->dev_addr);
 
-/* FIXME: should I modify the value of these?
- *
- * - hsr_dev->flags - i.e.
- *                     IFF_MASTER/SLAVE?
- * - hsr_dev->priv_flags - i.e.
- *                     IFF_EBRIDGE?
- *                     IFF_TX_SKB_SHARING?
- *                     IFF_HSR_MASTER/SLAVE?
- */
+       /* Make sure we recognize frames from ourselves in hsr_rcv() */
+       res = hsr_create_self_node(&hsr->self_node_db, hsr_dev->dev_addr,
+                                  slave[1]->dev_addr);
+       if (res < 0)
+               return res;
 
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               res = check_slave_ok(slave[i]);
-               if (res)
-                       return res;
-       }
+       spin_lock_init(&hsr->seqnr_lock);
+       /* Overflow soon to find bugs easier: */
+       hsr->sequence_nr = HSR_SEQNR_START;
 
-       hsr_dev->features = slave[0]->features & slave[1]->features;
-       /* Prevent recursive tx locking */
-       hsr_dev->features |= NETIF_F_LLTX;
-       /* VLAN on top of HSR needs testing and probably some work on
-        * hsr_header_create() etc.
-        */
-       hsr_dev->features |= NETIF_F_VLAN_CHALLENGED;
+       init_timer(&hsr->announce_timer);
+       hsr->announce_timer.function = hsr_announce;
+       hsr->announce_timer.data = (unsigned long) hsr;
 
-       /* Set hsr_dev's MAC address to that of mac_slave1 */
-       ether_addr_copy(hsr_dev->dev_addr, hsr_priv->slave[0]->dev_addr);
+       init_timer(&hsr->prune_timer);
+       hsr->prune_timer.function = hsr_prune_nodes;
+       hsr->prune_timer.data = (unsigned long) hsr;
 
-       /* Set required header length */
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               if (slave[i]->hard_header_len + HSR_TAGLEN >
-                                               hsr_dev->hard_header_len)
-                       hsr_dev->hard_header_len =
-                                       slave[i]->hard_header_len + HSR_TAGLEN;
-       }
+       ether_addr_copy(hsr->sup_multicast_addr, def_multicast_addr);
+       hsr->sup_multicast_addr[ETH_ALEN - 1] = multicast_spec;
 
-       /* MTU */
-       for (i = 0; i < HSR_MAX_SLAVE; i++)
-               if (slave[i]->mtu - HSR_TAGLEN < hsr_dev->mtu)
-                       hsr_dev->mtu = slave[i]->mtu - HSR_TAGLEN;
+       /* FIXME: should I modify the value of these?
+        *
+        * - hsr_dev->flags - i.e.
+        *                      IFF_MASTER/SLAVE?
+        * - hsr_dev->priv_flags - i.e.
+        *                      IFF_EBRIDGE?
+        *                      IFF_TX_SKB_SHARING?
+        *                      IFF_HSR_MASTER/SLAVE?
+        */
 
        /* Make sure the 1st call to netif_carrier_on() gets through */
        netif_carrier_off(hsr_dev);
 
-       /* Promiscuity */
-       for (i = 0; i < HSR_MAX_SLAVE; i++) {
-               res = dev_set_promiscuity(slave[i], 1);
-               if (res) {
-                       netdev_info(hsr_dev, "Cannot set slave promiscuity (%s, %d)\n",
-                                   slave[i]->name, res);
-                       goto fail;
-               }
-       }
+       res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
+       if (res)
+               return res;
 
-       /* Make sure we recognize frames from ourselves in hsr_rcv() */
-       res = hsr_create_self_node(&hsr_priv->self_node_db,
-                                       hsr_dev->dev_addr,
-                                       hsr_priv->slave[1]->dev_addr);
-       if (res < 0)
+       res = register_netdevice(hsr_dev);
+       if (res)
                goto fail;
 
-       res = register_netdevice(hsr_dev);
+       res = hsr_add_port(hsr, slave[0], HSR_PT_SLAVE_A);
+       if (res)
+               goto fail;
+       res = hsr_add_port(hsr, slave[1], HSR_PT_SLAVE_B);
        if (res)
                goto fail;
 
-       register_hsr_master(hsr_priv);
+       hsr->prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
+       add_timer(&hsr->prune_timer);
 
        return 0;
 
 fail:
-       restore_slaves(hsr_dev);
+       hsr_for_each_port(hsr, port)
+               hsr_del_port(port);
+
        return res;
 }
index 2c7148e..108a5d5 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
 #ifndef __HSR_DEVICE_H
 void hsr_dev_setup(struct net_device *dev);
 int hsr_dev_finalize(struct net_device *hsr_dev, struct net_device *slave[2],
                     unsigned char multicast_spec);
-void hsr_set_operstate(struct net_device *hsr_dev, struct net_device *slave1,
-                      struct net_device *slave2);
-void hsr_set_carrier(struct net_device *hsr_dev, struct net_device *slave1,
-                    struct net_device *slave2);
-void hsr_check_announce(struct net_device *hsr_dev, int old_operstate);
+void hsr_check_carrier_and_operstate(struct hsr_priv *hsr);
 bool is_hsr_master(struct net_device *dev);
-int hsr_get_max_mtu(struct hsr_priv *hsr_priv);
+int hsr_get_max_mtu(struct hsr_priv *hsr);
 
 #endif /* __HSR_DEVICE_H */
diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
new file mode 100644 (file)
index 0000000..7871ed6
--- /dev/null
@@ -0,0 +1,368 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#include "hsr_forward.h"
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include "hsr_main.h"
+#include "hsr_framereg.h"
+
+
+struct hsr_node;
+
+struct hsr_frame_info {
+       struct sk_buff *skb_std;
+       struct sk_buff *skb_hsr;
+       struct hsr_port *port_rcv;
+       struct hsr_node *node_src;
+       u16 sequence_nr;
+       bool is_supervision;
+       bool is_vlan;
+       bool is_local_dest;
+       bool is_local_exclusive;
+};
+
+
+/* The uses I can see for these HSR supervision frames are:
+ * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
+ *    22") to reset any sequence_nr counters belonging to that node. Useful if
+ *    the other node's counter has been reset for some reason.
+ *    --
+ *    Or not - resetting the counter and bridging the frame would create a
+ *    loop, unfortunately.
+ *
+ * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
+ *    frame is received from a particular node, we know something is wrong.
+ *    We just register these (as with normal frames) and throw them away.
+ *
+ * 3) Allow different MAC addresses for the two slave interfaces, using the
+ *    MacAddressA field.
+ */
+static bool is_supervision_frame(struct hsr_priv *hsr, struct sk_buff *skb)
+{
+       struct hsr_ethhdr_sp *hdr;
+
+       WARN_ON_ONCE(!skb_mac_header_was_set(skb));
+       hdr = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
+
+       if (!ether_addr_equal(hdr->ethhdr.h_dest,
+                             hsr->sup_multicast_addr))
+               return false;
+
+       if (get_hsr_stag_path(&hdr->hsr_sup) != 0x0f)
+               return false;
+       if ((hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
+           (hdr->hsr_sup.HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
+               return false;
+       if (hdr->hsr_sup.HSR_TLV_Length != 12)
+               return false;
+
+       return true;
+}
+
+
+static struct sk_buff *create_stripped_skb(struct sk_buff *skb_in,
+                                          struct hsr_frame_info *frame)
+{
+       struct sk_buff *skb;
+       int copylen;
+       unsigned char *dst, *src;
+
+       skb_pull(skb_in, HSR_HLEN);
+       skb = __pskb_copy(skb_in, skb_headroom(skb_in) - HSR_HLEN, GFP_ATOMIC);
+       skb_push(skb_in, HSR_HLEN);
+       if (skb == NULL)
+               return NULL;
+
+       skb_reset_mac_header(skb);
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start -= HSR_HLEN;
+
+       copylen = 2*ETH_ALEN;
+       if (frame->is_vlan)
+               copylen += VLAN_HLEN;
+       src = skb_mac_header(skb_in);
+       dst = skb_mac_header(skb);
+       memcpy(dst, src, copylen);
+
+       skb->protocol = eth_hdr(skb)->h_proto;
+       return skb;
+}
+
+static struct sk_buff *frame_get_stripped_skb(struct hsr_frame_info *frame,
+                                             struct hsr_port *port)
+{
+       if (!frame->skb_std)
+               frame->skb_std = create_stripped_skb(frame->skb_hsr, frame);
+       return skb_clone(frame->skb_std, GFP_ATOMIC);
+}
+
+
+static void hsr_fill_tag(struct sk_buff *skb, struct hsr_frame_info *frame,
+                        struct hsr_port *port)
+{
+       struct hsr_ethhdr *hsr_ethhdr;
+       int lane_id;
+       int lsdu_size;
+
+       if (port->type == HSR_PT_SLAVE_A)
+               lane_id = 0;
+       else
+               lane_id = 1;
+
+       lsdu_size = skb->len - 14;
+       if (frame->is_vlan)
+               lsdu_size -= 4;
+
+       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+
+       set_hsr_tag_path(&hsr_ethhdr->hsr_tag, lane_id);
+       set_hsr_tag_LSDU_size(&hsr_ethhdr->hsr_tag, lsdu_size);
+       hsr_ethhdr->hsr_tag.sequence_nr = htons(frame->sequence_nr);
+       hsr_ethhdr->hsr_tag.encap_proto = hsr_ethhdr->ethhdr.h_proto;
+       hsr_ethhdr->ethhdr.h_proto = htons(ETH_P_PRP);
+}
+
+static struct sk_buff *create_tagged_skb(struct sk_buff *skb_o,
+                                        struct hsr_frame_info *frame,
+                                        struct hsr_port *port)
+{
+       int movelen;
+       unsigned char *dst, *src;
+       struct sk_buff *skb;
+
+       /* Create the new skb with enough headroom to fit the HSR tag */
+       skb = __pskb_copy(skb_o, skb_headroom(skb_o) + HSR_HLEN, GFP_ATOMIC);
+       if (skb == NULL)
+               return NULL;
+       skb_reset_mac_header(skb);
+
+       if (skb->ip_summed == CHECKSUM_PARTIAL)
+               skb->csum_start += HSR_HLEN;
+
+       movelen = ETH_HLEN;
+       if (frame->is_vlan)
+               movelen += VLAN_HLEN;
+
+       src = skb_mac_header(skb);
+       dst = skb_push(skb, HSR_HLEN);
+       memmove(dst, src, movelen);
+       skb_reset_mac_header(skb);
+
+       hsr_fill_tag(skb, frame, port);
+
+       return skb;
+}
+
+/* If the original frame was an HSR tagged frame, just clone it to be sent
+ * unchanged. Otherwise, create a private frame especially tagged for 'port'.
+ */
+static struct sk_buff *frame_get_tagged_skb(struct hsr_frame_info *frame,
+                                           struct hsr_port *port)
+{
+       if (frame->skb_hsr)
+               return skb_clone(frame->skb_hsr, GFP_ATOMIC);
+
+       if ((port->type != HSR_PT_SLAVE_A) && (port->type != HSR_PT_SLAVE_B)) {
+               WARN_ONCE(1, "HSR: Bug: trying to create a tagged frame for a non-ring port");
+               return NULL;
+       }
+
+       return create_tagged_skb(frame->skb_std, frame, port);
+}
+
+
+static void hsr_deliver_master(struct sk_buff *skb, struct net_device *dev,
+                              struct hsr_node *node_src)
+{
+       bool was_multicast_frame;
+       int res;
+
+       was_multicast_frame = (skb->pkt_type == PACKET_MULTICAST);
+       hsr_addr_subst_source(node_src, skb);
+       skb_pull(skb, ETH_HLEN);
+       res = netif_rx(skb);
+       if (res == NET_RX_DROP) {
+               dev->stats.rx_dropped++;
+       } else {
+               dev->stats.rx_packets++;
+               dev->stats.rx_bytes += skb->len;
+               if (was_multicast_frame)
+                       dev->stats.multicast++;
+       }
+}
+
+static int hsr_xmit(struct sk_buff *skb, struct hsr_port *port,
+                   struct hsr_frame_info *frame)
+{
+       if (frame->port_rcv->type == HSR_PT_MASTER) {
+               hsr_addr_subst_dest(frame->node_src, skb, port);
+
+               /* Address substitution (IEC62439-3 pp 26, 50): replace mac
+                * address of outgoing frame with that of the outgoing slave's.
+                */
+               ether_addr_copy(eth_hdr(skb)->h_source, port->dev->dev_addr);
+       }
+       return dev_queue_xmit(skb);
+}
+
+
+/* Forward the frame through all devices except:
+ * - Back through the receiving device
+ * - If it's a HSR frame: through a device where it has passed before
+ * - To the local HSR master only if the frame is directly addressed to it, or
+ *   a non-supervision multicast or broadcast frame.
+ *
+ * HSR slave devices should insert a HSR tag into the frame, or forward the
+ * frame unchanged if it's already tagged. Interlink devices should strip HSR
+ * tags if they're of the non-HSR type (but only after duplicate discard). The
+ * master device always strips HSR tags.
+ */
+static void hsr_forward_do(struct hsr_frame_info *frame)
+{
+       struct hsr_port *port;
+       struct sk_buff *skb;
+
+       hsr_for_each_port(frame->port_rcv->hsr, port) {
+               /* Don't send frame back the way it came */
+               if (port == frame->port_rcv)
+                       continue;
+
+               /* Don't deliver locally unless we should */
+               if ((port->type == HSR_PT_MASTER) && !frame->is_local_dest)
+                       continue;
+
+               /* Deliver frames directly addressed to us to master only */
+               if ((port->type != HSR_PT_MASTER) && frame->is_local_exclusive)
+                       continue;
+
+               /* Don't send frame over port where it has been sent before */
+               if (hsr_register_frame_out(port, frame->node_src,
+                                          frame->sequence_nr))
+                       continue;
+
+               if (frame->is_supervision && (port->type == HSR_PT_MASTER)) {
+                       hsr_handle_sup_frame(frame->skb_hsr,
+                                            frame->node_src,
+                                            frame->port_rcv);
+                       continue;
+               }
+
+               if (port->type != HSR_PT_MASTER)
+                       skb = frame_get_tagged_skb(frame, port);
+               else
+                       skb = frame_get_stripped_skb(frame, port);
+               if (skb == NULL) {
+                       /* FIXME: Record the dropped frame? */
+                       continue;
+               }
+
+               skb->dev = port->dev;
+               if (port->type == HSR_PT_MASTER)
+                       hsr_deliver_master(skb, port->dev, frame->node_src);
+               else
+                       hsr_xmit(skb, port, frame);
+       }
+}
+
+
+static void check_local_dest(struct hsr_priv *hsr, struct sk_buff *skb,
+                            struct hsr_frame_info *frame)
+{
+       struct net_device *master_dev;
+
+       master_dev = hsr_port_get_hsr(hsr, HSR_PT_MASTER)->dev;
+
+       if (hsr_addr_is_self(hsr, eth_hdr(skb)->h_dest)) {
+               frame->is_local_exclusive = true;
+               skb->pkt_type = PACKET_HOST;
+       } else {
+               frame->is_local_exclusive = false;
+       }
+
+       if ((skb->pkt_type == PACKET_HOST) ||
+           (skb->pkt_type == PACKET_MULTICAST) ||
+           (skb->pkt_type == PACKET_BROADCAST)) {
+               frame->is_local_dest = true;
+       } else {
+               frame->is_local_dest = false;
+       }
+}
+
+
+static int hsr_fill_frame_info(struct hsr_frame_info *frame,
+                              struct sk_buff *skb, struct hsr_port *port)
+{
+       struct ethhdr *ethhdr;
+       unsigned long irqflags;
+
+       frame->is_supervision = is_supervision_frame(port->hsr, skb);
+       frame->node_src = hsr_get_node(&port->hsr->node_db, skb,
+                                      frame->is_supervision);
+       if (frame->node_src == NULL)
+               return -1; /* Unknown node and !is_supervision, or no mem */
+
+       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+       frame->is_vlan = false;
+       if (ethhdr->h_proto == htons(ETH_P_8021Q)) {
+               frame->is_vlan = true;
+               /* FIXME: */
+               WARN_ONCE(1, "HSR: VLAN not yet supported");
+       }
+       if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+               frame->skb_std = NULL;
+               frame->skb_hsr = skb;
+               frame->sequence_nr = hsr_get_skb_sequence_nr(skb);
+       } else {
+               frame->skb_std = skb;
+               frame->skb_hsr = NULL;
+               /* Sequence nr for the master node */
+               spin_lock_irqsave(&port->hsr->seqnr_lock, irqflags);
+               frame->sequence_nr = port->hsr->sequence_nr;
+               port->hsr->sequence_nr++;
+               spin_unlock_irqrestore(&port->hsr->seqnr_lock, irqflags);
+       }
+
+       frame->port_rcv = port;
+       check_local_dest(port->hsr, skb, frame);
+
+       return 0;
+}
+
+/* Must be called holding rcu read lock (because of the port parameter) */
+void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port)
+{
+       struct hsr_frame_info frame;
+
+       if (skb_mac_header(skb) != skb->data) {
+               WARN_ONCE(1, "%s:%d: Malformed frame (port_src %s)\n",
+                         __FILE__, __LINE__, port->dev->name);
+               goto out_drop;
+       }
+
+       if (hsr_fill_frame_info(&frame, skb, port) < 0)
+               goto out_drop;
+       hsr_register_frame_in(frame.node_src, port, frame.sequence_nr);
+       hsr_forward_do(&frame);
+
+       if (frame.skb_hsr != NULL)
+               kfree_skb(frame.skb_hsr);
+       if (frame.skb_std != NULL)
+               kfree_skb(frame.skb_std);
+       return;
+
+out_drop:
+       port->dev->stats.tx_dropped++;
+       kfree_skb(skb);
+}
diff --git a/net/hsr/hsr_forward.h b/net/hsr/hsr_forward.h
new file mode 100644 (file)
index 0000000..5c5bc4b
--- /dev/null
@@ -0,0 +1,20 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#ifndef __HSR_FORWARD_H
+#define __HSR_FORWARD_H
+
+#include <linux/netdevice.h>
+#include "hsr_main.h"
+
+void hsr_forward_skb(struct sk_buff *skb, struct hsr_port *port);
+
+#endif /* __HSR_FORWARD_H */
index 83e5844..bace124 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  *
  * The HSR spec says never to forward the same frame twice on the same
  * interface. A frame is identified by its source MAC address and its HSR
 #include "hsr_netlink.h"
 
 
-struct node_entry {
-       struct list_head mac_list;
-       unsigned char   MacAddressA[ETH_ALEN];
-       unsigned char   MacAddressB[ETH_ALEN];
-       enum hsr_dev_idx   AddrB_if;    /* The local slave through which AddrB
-                                        * frames are received from this node
-                                        */
-       unsigned long   time_in[HSR_MAX_SLAVE];
-       bool            time_in_stale[HSR_MAX_SLAVE];
-       u16             seq_out[HSR_MAX_DEV];
-       struct rcu_head rcu_head;
+struct hsr_node {
+       struct list_head        mac_list;
+       unsigned char           MacAddressA[ETH_ALEN];
+       unsigned char           MacAddressB[ETH_ALEN];
+       /* Local slave through which AddrB frames are received from this node */
+       enum hsr_port_type      AddrB_port;
+       unsigned long           time_in[HSR_PT_PORTS];
+       bool                    time_in_stale[HSR_PT_PORTS];
+       u16                     seq_out[HSR_PT_PORTS];
+       struct rcu_head         rcu_head;
 };
 
-/*     TODO: use hash lists for mac addresses (linux/jhash.h)?    */
 
+/*     TODO: use hash lists for mac addresses (linux/jhash.h)?    */
 
 
-/* Search for mac entry. Caller must hold rcu read lock.
+/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
+ * false otherwise.
  */
-static struct node_entry *find_node_by_AddrA(struct list_head *node_db,
-                                            const unsigned char addr[ETH_ALEN])
+static bool seq_nr_after(u16 a, u16 b)
 {
-       struct node_entry *node;
-
-       list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressA, addr))
-                       return node;
-       }
+       /* Remove inconsistency where
+        * seq_nr_after(a, b) == seq_nr_before(a, b)
+        */
+       if ((int) b - a == 32768)
+               return false;
 
-       return NULL;
+       return (((s16) (b - a)) < 0);
 }
+#define seq_nr_before(a, b)            seq_nr_after((b), (a))
+#define seq_nr_after_or_eq(a, b)       (!seq_nr_before((a), (b)))
+#define seq_nr_before_or_eq(a, b)      (!seq_nr_after((a), (b)))
 
 
-/* Search for mac entry. Caller must hold rcu read lock.
- */
-static struct node_entry *find_node_by_AddrB(struct list_head *node_db,
-                                            const unsigned char addr[ETH_ALEN])
+bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr)
 {
-       struct node_entry *node;
+       struct hsr_node *node;
 
-       list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressB, addr))
-                       return node;
+       node = list_first_or_null_rcu(&hsr->self_node_db, struct hsr_node,
+                                     mac_list);
+       if (!node) {
+               WARN_ONCE(1, "HSR: No self node\n");
+               return false;
        }
 
-       return NULL;
-}
+       if (ether_addr_equal(addr, node->MacAddressA))
+               return true;
+       if (ether_addr_equal(addr, node->MacAddressB))
+               return true;
 
+       return false;
+}
 
 /* Search for mac entry. Caller must hold rcu read lock.
  */
-struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb)
+static struct hsr_node *find_node_by_AddrA(struct list_head *node_db,
+                                          const unsigned char addr[ETH_ALEN])
 {
-       struct node_entry *node;
-       struct ethhdr *ethhdr;
-
-       if (!skb_mac_header_was_set(skb))
-               return NULL;
-
-       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+       struct hsr_node *node;
 
        list_for_each_entry_rcu(node, node_db, mac_list) {
-               if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
-                       return node;
-               if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
+               if (ether_addr_equal(node->MacAddressA, addr))
                        return node;
        }
 
@@ -102,7 +99,7 @@ int hsr_create_self_node(struct list_head *self_node_db,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN])
 {
-       struct node_entry *node, *oldnode;
+       struct hsr_node *node, *oldnode;
 
        node = kmalloc(sizeof(*node), GFP_KERNEL);
        if (!node)
@@ -113,7 +110,7 @@ int hsr_create_self_node(struct list_head *self_node_db,
 
        rcu_read_lock();
        oldnode = list_first_or_null_rcu(self_node_db,
-                                               struct node_entry, mac_list);
+                                               struct hsr_node, mac_list);
        if (oldnode) {
                list_replace_rcu(&oldnode->mac_list, &node->mac_list);
                rcu_read_unlock();
@@ -128,135 +125,144 @@ int hsr_create_self_node(struct list_head *self_node_db,
 }
 
 
-/* Add/merge node to the database of nodes. 'skb' must contain an HSR
- * supervision frame.
- * - If the supervision header's MacAddressA field is not yet in the database,
- * this frame is from an hitherto unknown node - add it to the database.
- * - If the sender's MAC address is not the same as its MacAddressA address,
- * the node is using PICS_SUBS (address substitution). Record the sender's
- * address as the node's MacAddressB.
- *
- * This function needs to work even if the sender node has changed one of its
- * slaves' MAC addresses. In this case, there are four different cases described
- * by (Addr-changed, received-from) pairs as follows. Note that changing the
- * SlaveA address is equal to changing the node's own address:
- *
- * - (AddrB, SlaveB): The new AddrB will be recorded by PICS_SUBS code since
- *                   node == NULL.
- * - (AddrB, SlaveA): Will work as usual (the AddrB change won't be detected
- *                   from this frame).
- *
- * - (AddrA, SlaveB): The old node will be found. We need to detect this and
- *                   remove the node.
- * - (AddrA, SlaveA): A new node will be registered (non-PICS_SUBS at first).
- *                   The old one will be pruned after HSR_NODE_FORGET_TIME.
- *
- * We also need to detect if the sender's SlaveA and SlaveB cables have been
- * swapped.
+/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
+ * seq_out is used to initialize filtering of outgoing duplicate frames
+ * originating from the newly added node.
  */
-struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
-                                 struct node_entry *node,
-                                 struct sk_buff *skb,
-                                 enum hsr_dev_idx dev_idx)
+struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
+                             u16 seq_out)
 {
-       struct hsr_sup_payload *hsr_sp;
-       struct hsr_ethhdr_sp *hsr_ethsup;
-       int i;
+       struct hsr_node *node;
        unsigned long now;
-
-       hsr_ethsup = (struct hsr_ethhdr_sp *) skb_mac_header(skb);
-       hsr_sp = (struct hsr_sup_payload *) skb->data;
-
-       if (node && !ether_addr_equal(node->MacAddressA, hsr_sp->MacAddressA)) {
-               /* Node has changed its AddrA, frame was received from SlaveB */
-               list_del_rcu(&node->mac_list);
-               kfree_rcu(node, rcu_head);
-               node = NULL;
-       }
-
-       if (node && (dev_idx == node->AddrB_if) &&
-           !ether_addr_equal(node->MacAddressB, hsr_ethsup->ethhdr.h_source)) {
-               /* Cables have been swapped */
-               list_del_rcu(&node->mac_list);
-               kfree_rcu(node, rcu_head);
-               node = NULL;
-       }
-
-       if (node && (dev_idx != node->AddrB_if) &&
-           (node->AddrB_if != HSR_DEV_NONE) &&
-           !ether_addr_equal(node->MacAddressA, hsr_ethsup->ethhdr.h_source)) {
-               /* Cables have been swapped */
-               list_del_rcu(&node->mac_list);
-               kfree_rcu(node, rcu_head);
-               node = NULL;
-       }
-
-       if (node)
-               return node;
-
-       node = find_node_by_AddrA(&hsr_priv->node_db, hsr_sp->MacAddressA);
-       if (node) {
-               /* Node is known, but frame was received from an unknown
-                * address. Node is PICS_SUBS capable; merge its AddrB.
-                */
-               ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
-               node->AddrB_if = dev_idx;
-               return node;
-       }
+       int i;
 
        node = kzalloc(sizeof(*node), GFP_ATOMIC);
        if (!node)
                return NULL;
 
-       ether_addr_copy(node->MacAddressA, hsr_sp->MacAddressA);
-       ether_addr_copy(node->MacAddressB, hsr_ethsup->ethhdr.h_source);
-       if (!ether_addr_equal(hsr_sp->MacAddressA, hsr_ethsup->ethhdr.h_source))
-               node->AddrB_if = dev_idx;
-       else
-               node->AddrB_if = HSR_DEV_NONE;
+       ether_addr_copy(node->MacAddressA, addr);
 
        /* We are only interested in time diffs here, so use current jiffies
         * as initialization. (0 could trigger an spurious ring error warning).
         */
        now = jiffies;
-       for (i = 0; i < HSR_MAX_SLAVE; i++)
+       for (i = 0; i < HSR_PT_PORTS; i++)
                node->time_in[i] = now;
-       for (i = 0; i < HSR_MAX_DEV; i++)
-               node->seq_out[i] = ntohs(hsr_ethsup->hsr_sup.sequence_nr) - 1;
+       for (i = 0; i < HSR_PT_PORTS; i++)
+               node->seq_out[i] = seq_out;
 
-       list_add_tail_rcu(&node->mac_list, &hsr_priv->node_db);
+       list_add_tail_rcu(&node->mac_list, node_db);
 
        return node;
 }
 
+/* Get the hsr_node from which 'skb' was sent.
+ */
+struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+                             bool is_sup)
+{
+       struct hsr_node *node;
+       struct ethhdr *ethhdr;
+       u16 seq_out;
+
+       if (!skb_mac_header_was_set(skb))
+               return NULL;
+
+       ethhdr = (struct ethhdr *) skb_mac_header(skb);
+
+       list_for_each_entry_rcu(node, node_db, mac_list) {
+               if (ether_addr_equal(node->MacAddressA, ethhdr->h_source))
+                       return node;
+               if (ether_addr_equal(node->MacAddressB, ethhdr->h_source))
+                       return node;
+       }
+
+       if (!is_sup)
+               return NULL; /* Only supervision frame may create node entry */
+
+       if (ethhdr->h_proto == htons(ETH_P_PRP)) {
+               /* Use the existing sequence_nr from the tag as starting point
+                * for filtering duplicate frames.
+                */
+               seq_out = hsr_get_skb_sequence_nr(skb) - 1;
+       } else {
+               WARN_ONCE(1, "%s: Non-HSR frame\n", __func__);
+               seq_out = 0;
+       }
+
+       return hsr_add_node(node_db, ethhdr->h_source, seq_out);
+}
+
+/* Use the Supervision frame's info about an eventual MacAddressB for merging
+ * nodes that has previously had their MacAddressB registered as a separate
+ * node.
+ */
+void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
+                         struct hsr_port *port_rcv)
+{
+       struct hsr_node *node_real;
+       struct hsr_sup_payload *hsr_sp;
+       struct list_head *node_db;
+       int i;
+
+       skb_pull(skb, sizeof(struct hsr_ethhdr_sp));
+       hsr_sp = (struct hsr_sup_payload *) skb->data;
+
+       if (ether_addr_equal(eth_hdr(skb)->h_source, hsr_sp->MacAddressA))
+               /* Not sent from MacAddressB of a PICS_SUBS capable node */
+               goto done;
+
+       /* Merge node_curr (registered on MacAddressB) into node_real */
+       node_db = &port_rcv->hsr->node_db;
+       node_real = find_node_by_AddrA(node_db, hsr_sp->MacAddressA);
+       if (!node_real)
+               /* No frame received from AddrA of this node yet */
+               node_real = hsr_add_node(node_db, hsr_sp->MacAddressA,
+                                        HSR_SEQNR_START - 1);
+       if (!node_real)
+               goto done; /* No mem */
+       if (node_real == node_curr)
+               /* Node has already been merged */
+               goto done;
+
+       ether_addr_copy(node_real->MacAddressB, eth_hdr(skb)->h_source);
+       for (i = 0; i < HSR_PT_PORTS; i++) {
+               if (!node_curr->time_in_stale[i] &&
+                   time_after(node_curr->time_in[i], node_real->time_in[i])) {
+                       node_real->time_in[i] = node_curr->time_in[i];
+                       node_real->time_in_stale[i] = node_curr->time_in_stale[i];
+               }
+               if (seq_nr_after(node_curr->seq_out[i], node_real->seq_out[i]))
+                       node_real->seq_out[i] = node_curr->seq_out[i];
+       }
+       node_real->AddrB_port = port_rcv->type;
+
+       list_del_rcu(&node_curr->mac_list);
+       kfree_rcu(node_curr, rcu_head);
+
+done:
+       skb_push(skb, sizeof(struct hsr_ethhdr_sp));
+}
+
 
 /* 'skb' is a frame meant for this host, that is to be passed to upper layers.
  *
- * If the frame was sent by a node's B interface, replace the sender
+ * If the frame was sent by a node's B interface, replace the source
  * address with that node's "official" address (MacAddressA) so that upper
  * layers recognize where it came from.
  */
-void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
+void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb)
 {
-       struct ethhdr *ethhdr;
-       struct node_entry *node;
-
        if (!skb_mac_header_was_set(skb)) {
                WARN_ONCE(1, "%s: Mac header not set\n", __func__);
                return;
        }
-       ethhdr = (struct ethhdr *) skb_mac_header(skb);
 
-       rcu_read_lock();
-       node = find_node_by_AddrB(&hsr_priv->node_db, ethhdr->h_source);
-       if (node)
-               ether_addr_copy(ethhdr->h_source, node->MacAddressA);
-       rcu_read_unlock();
+       memcpy(&eth_hdr(skb)->h_source, node->MacAddressA, ETH_ALEN);
 }
 
-
 /* 'skb' is a frame meant for another host.
- * 'hsr_dev_idx' is the HSR index of the outgoing device
+ * 'port' is the outgoing interface
  *
  * Substitute the target (dest) MAC address if necessary, so the it matches the
  * recipient interface MAC address, regardless of whether that is the
@@ -264,47 +270,44 @@ void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb)
  * This is needed to keep the packets flowing through switches that learn on
  * which "side" the different interfaces are.
  */
-void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
-                        enum hsr_dev_idx dev_idx)
+void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+                        struct hsr_port *port)
 {
-       struct node_entry *node;
+       struct hsr_node *node_dst;
 
-       rcu_read_lock();
-       node = find_node_by_AddrA(&hsr_priv->node_db, ethhdr->h_dest);
-       if (node && (node->AddrB_if == dev_idx))
-               ether_addr_copy(ethhdr->h_dest, node->MacAddressB);
-       rcu_read_unlock();
-}
+       if (!skb_mac_header_was_set(skb)) {
+               WARN_ONCE(1, "%s: Mac header not set\n", __func__);
+               return;
+       }
 
+       if (!is_unicast_ether_addr(eth_hdr(skb)->h_dest))
+               return;
 
-/* seq_nr_after(a, b) - return true if a is after (higher in sequence than) b,
- * false otherwise.
- */
-static bool seq_nr_after(u16 a, u16 b)
-{
-       /* Remove inconsistency where
-        * seq_nr_after(a, b) == seq_nr_before(a, b)
-        */
-       if ((int) b - a == 32768)
-               return false;
+       node_dst = find_node_by_AddrA(&port->hsr->node_db, eth_hdr(skb)->h_dest);
+       if (!node_dst) {
+               WARN_ONCE(1, "%s: Unknown node\n", __func__);
+               return;
+       }
+       if (port->type != node_dst->AddrB_port)
+               return;
 
-       return (((s16) (b - a)) < 0);
+       ether_addr_copy(eth_hdr(skb)->h_dest, node_dst->MacAddressB);
 }
-#define seq_nr_before(a, b)            seq_nr_after((b), (a))
-#define seq_nr_after_or_eq(a, b)       (!seq_nr_before((a), (b)))
-#define seq_nr_before_or_eq(a, b)      (!seq_nr_after((a), (b)))
 
 
-void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
+void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+                          u16 sequence_nr)
 {
-       if ((dev_idx < 0) || (dev_idx >= HSR_MAX_SLAVE)) {
-               WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
+       /* Don't register incoming frames without a valid sequence number. This
+        * ensures entries of restarted nodes gets pruned so that they can
+        * re-register and resume communications.
+        */
+       if (seq_nr_before(sequence_nr, node->seq_out[port->type]))
                return;
-       }
-       node->time_in[dev_idx] = jiffies;
-       node->time_in_stale[dev_idx] = false;
-}
 
+       node->time_in[port->type] = jiffies;
+       node->time_in_stale[port->type] = false;
+}
 
 /* 'skb' is a HSR Ethernet frame (with a HSR tag inserted), with a valid
  * ethhdr->h_source address and skb->mac_header set.
@@ -314,102 +317,87 @@ void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx)
  *      0 otherwise, or
  *      negative error code on error
  */
-int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
-                          struct sk_buff *skb)
+int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+                          u16 sequence_nr)
 {
-       struct hsr_ethhdr *hsr_ethhdr;
-       u16 sequence_nr;
-
-       if ((dev_idx < 0) || (dev_idx >= HSR_MAX_DEV)) {
-               WARN_ONCE(1, "%s: Invalid dev_idx (%d)\n", __func__, dev_idx);
-               return -EINVAL;
-       }
-       if (!skb_mac_header_was_set(skb)) {
-               WARN_ONCE(1, "%s: Mac header not set\n", __func__);
-               return -EINVAL;
-       }
-       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
-
-       sequence_nr = ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
-       if (seq_nr_before_or_eq(sequence_nr, node->seq_out[dev_idx]))
+       if (seq_nr_before_or_eq(sequence_nr, node->seq_out[port->type]))
                return 1;
 
-       node->seq_out[dev_idx] = sequence_nr;
+       node->seq_out[port->type] = sequence_nr;
        return 0;
 }
 
 
-
-static bool is_late(struct node_entry *node, enum hsr_dev_idx dev_idx)
+static struct hsr_port *get_late_port(struct hsr_priv *hsr,
+                                     struct hsr_node *node)
 {
-       enum hsr_dev_idx other;
-
-       if (node->time_in_stale[dev_idx])
-               return true;
-
-       if (dev_idx == HSR_DEV_SLAVE_A)
-               other = HSR_DEV_SLAVE_B;
-       else
-               other = HSR_DEV_SLAVE_A;
-
-       if (node->time_in_stale[other])
-               return false;
+       if (node->time_in_stale[HSR_PT_SLAVE_A])
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (node->time_in_stale[HSR_PT_SLAVE_B])
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+
+       if (time_after(node->time_in[HSR_PT_SLAVE_B],
+                      node->time_in[HSR_PT_SLAVE_A] +
+                                       msecs_to_jiffies(MAX_SLAVE_DIFF)))
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (time_after(node->time_in[HSR_PT_SLAVE_A],
+                      node->time_in[HSR_PT_SLAVE_B] +
+                                       msecs_to_jiffies(MAX_SLAVE_DIFF)))
+               return hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
 
-       if (time_after(node->time_in[other], node->time_in[dev_idx] +
-                      msecs_to_jiffies(MAX_SLAVE_DIFF)))
-               return true;
-
-       return false;
+       return NULL;
 }
 
 
 /* Remove stale sequence_nr records. Called by timer every
  * HSR_LIFE_CHECK_INTERVAL (two seconds or so).
  */
-void hsr_prune_nodes(struct hsr_priv *hsr_priv)
+void hsr_prune_nodes(unsigned long data)
 {
-       struct node_entry *node;
+       struct hsr_priv *hsr;
+       struct hsr_node *node;
+       struct hsr_port *port;
        unsigned long timestamp;
        unsigned long time_a, time_b;
 
+       hsr = (struct hsr_priv *) data;
+
        rcu_read_lock();
-       list_for_each_entry_rcu(node, &hsr_priv->node_db, mac_list) {
+       list_for_each_entry_rcu(node, &hsr->node_db, mac_list) {
                /* Shorthand */
-               time_a = node->time_in[HSR_DEV_SLAVE_A];
-               time_b = node->time_in[HSR_DEV_SLAVE_B];
+               time_a = node->time_in[HSR_PT_SLAVE_A];
+               time_b = node->time_in[HSR_PT_SLAVE_B];
 
                /* Check for timestamps old enough to risk wrap-around */
                if (time_after(jiffies, time_a + MAX_JIFFY_OFFSET/2))
-                       node->time_in_stale[HSR_DEV_SLAVE_A] = true;
+                       node->time_in_stale[HSR_PT_SLAVE_A] = true;
                if (time_after(jiffies, time_b + MAX_JIFFY_OFFSET/2))
-                       node->time_in_stale[HSR_DEV_SLAVE_B] = true;
+                       node->time_in_stale[HSR_PT_SLAVE_B] = true;
 
                /* Get age of newest frame from node.
                 * At least one time_in is OK here; nodes get pruned long
                 * before both time_ins can get stale
                 */
                timestamp = time_a;
-               if (node->time_in_stale[HSR_DEV_SLAVE_A] ||
-                   (!node->time_in_stale[HSR_DEV_SLAVE_B] &&
+               if (node->time_in_stale[HSR_PT_SLAVE_A] ||
+                   (!node->time_in_stale[HSR_PT_SLAVE_B] &&
                    time_after(time_b, time_a)))
                        timestamp = time_b;
 
                /* Warn of ring error only as long as we get frames at all */
                if (time_is_after_jiffies(timestamp +
                                        msecs_to_jiffies(1.5*MAX_SLAVE_DIFF))) {
-
-                       if (is_late(node, HSR_DEV_SLAVE_A))
-                               hsr_nl_ringerror(hsr_priv, node->MacAddressA,
-                                                HSR_DEV_SLAVE_A);
-                       else if (is_late(node, HSR_DEV_SLAVE_B))
-                               hsr_nl_ringerror(hsr_priv, node->MacAddressA,
-                                                HSR_DEV_SLAVE_B);
+                       rcu_read_lock();
+                       port = get_late_port(hsr, node);
+                       if (port != NULL)
+                               hsr_nl_ringerror(hsr, node->MacAddressA, port);
+                       rcu_read_unlock();
                }
 
                /* Prune old entries */
                if (time_is_before_jiffies(timestamp +
                                        msecs_to_jiffies(HSR_NODE_FORGET_TIME))) {
-                       hsr_nl_nodedown(hsr_priv, node->MacAddressA);
+                       hsr_nl_nodedown(hsr, node->MacAddressA);
                        list_del_rcu(&node->mac_list);
                        /* Note that we need to free this entry later: */
                        kfree_rcu(node, rcu_head);
@@ -419,21 +407,21 @@ void hsr_prune_nodes(struct hsr_priv *hsr_priv)
 }
 
 
-void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
                        unsigned char addr[ETH_ALEN])
 {
-       struct node_entry *node;
+       struct hsr_node *node;
 
        if (!_pos) {
-               node = list_first_or_null_rcu(&hsr_priv->node_db,
-                                               struct node_entry, mac_list);
+               node = list_first_or_null_rcu(&hsr->node_db,
+                                             struct hsr_node, mac_list);
                if (node)
                        ether_addr_copy(addr, node->MacAddressA);
                return node;
        }
 
        node = _pos;
-       list_for_each_entry_continue_rcu(node, &hsr_priv->node_db, mac_list) {
+       list_for_each_entry_continue_rcu(node, &hsr->node_db, mac_list) {
                ether_addr_copy(addr, node->MacAddressA);
                return node;
        }
@@ -442,7 +430,7 @@ void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
 }
 
 
-int hsr_get_node_data(struct hsr_priv *hsr_priv,
+int hsr_get_node_data(struct hsr_priv *hsr,
                      const unsigned char *addr,
                      unsigned char addr_b[ETH_ALEN],
                      unsigned int *addr_b_ifindex,
@@ -451,12 +439,13 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
                      int *if2_age,
                      u16 *if2_seq)
 {
-       struct node_entry *node;
+       struct hsr_node *node;
+       struct hsr_port *port;
        unsigned long tdiff;
 
 
        rcu_read_lock();
-       node = find_node_by_AddrA(&hsr_priv->node_db, addr);
+       node = find_node_by_AddrA(&hsr->node_db, addr);
        if (!node) {
                rcu_read_unlock();
                return -ENOENT; /* No such entry */
@@ -464,8 +453,8 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
 
        ether_addr_copy(addr_b, node->MacAddressB);
 
-       tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_A];
-       if (node->time_in_stale[HSR_DEV_SLAVE_A])
+       tdiff = jiffies - node->time_in[HSR_PT_SLAVE_A];
+       if (node->time_in_stale[HSR_PT_SLAVE_A])
                *if1_age = INT_MAX;
 #if HZ <= MSEC_PER_SEC
        else if (tdiff > msecs_to_jiffies(INT_MAX))
@@ -474,8 +463,8 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
        else
                *if1_age = jiffies_to_msecs(tdiff);
 
-       tdiff = jiffies - node->time_in[HSR_DEV_SLAVE_B];
-       if (node->time_in_stale[HSR_DEV_SLAVE_B])
+       tdiff = jiffies - node->time_in[HSR_PT_SLAVE_B];
+       if (node->time_in_stale[HSR_PT_SLAVE_B])
                *if2_age = INT_MAX;
 #if HZ <= MSEC_PER_SEC
        else if (tdiff > msecs_to_jiffies(INT_MAX))
@@ -485,13 +474,15 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
                *if2_age = jiffies_to_msecs(tdiff);
 
        /* Present sequence numbers as if they were incoming on interface */
-       *if1_seq = node->seq_out[HSR_DEV_SLAVE_B];
-       *if2_seq = node->seq_out[HSR_DEV_SLAVE_A];
+       *if1_seq = node->seq_out[HSR_PT_SLAVE_B];
+       *if2_seq = node->seq_out[HSR_PT_SLAVE_A];
 
-       if ((node->AddrB_if != HSR_DEV_NONE) && hsr_priv->slave[node->AddrB_if])
-               *addr_b_ifindex = hsr_priv->slave[node->AddrB_if]->ifindex;
-       else
+       if (node->AddrB_port != HSR_PT_NONE) {
+               port = hsr_port_get_hsr(hsr, node->AddrB_port);
+               *addr_b_ifindex = port->dev->ifindex;
+       } else {
                *addr_b_ifindex = -1;
+       }
 
        rcu_read_unlock();
 
index e6c4022..438b40f 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,42 +6,43 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
-#ifndef _HSR_FRAMEREG_H
-#define _HSR_FRAMEREG_H
+#ifndef __HSR_FRAMEREG_H
+#define __HSR_FRAMEREG_H
 
 #include "hsr_main.h"
 
-struct node_entry;
+struct hsr_node;
 
-struct node_entry *hsr_find_node(struct list_head *node_db, struct sk_buff *skb);
+struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
+                             u16 seq_out);
+struct hsr_node *hsr_get_node(struct list_head *node_db, struct sk_buff *skb,
+                             bool is_sup);
+void hsr_handle_sup_frame(struct sk_buff *skb, struct hsr_node *node_curr,
+                         struct hsr_port *port);
+bool hsr_addr_is_self(struct hsr_priv *hsr, unsigned char *addr);
 
-struct node_entry *hsr_merge_node(struct hsr_priv *hsr_priv,
-                                 struct node_entry *node,
-                                 struct sk_buff *skb,
-                                 enum hsr_dev_idx dev_idx);
+void hsr_addr_subst_source(struct hsr_node *node, struct sk_buff *skb);
+void hsr_addr_subst_dest(struct hsr_node *node_src, struct sk_buff *skb,
+                        struct hsr_port *port);
 
-void hsr_addr_subst_source(struct hsr_priv *hsr_priv, struct sk_buff *skb);
-void hsr_addr_subst_dest(struct hsr_priv *hsr_priv, struct ethhdr *ethhdr,
-                        enum hsr_dev_idx dev_idx);
+void hsr_register_frame_in(struct hsr_node *node, struct hsr_port *port,
+                          u16 sequence_nr);
+int hsr_register_frame_out(struct hsr_port *port, struct hsr_node *node,
+                          u16 sequence_nr);
 
-void hsr_register_frame_in(struct node_entry *node, enum hsr_dev_idx dev_idx);
-
-int hsr_register_frame_out(struct node_entry *node, enum hsr_dev_idx dev_idx,
-                          struct sk_buff *skb);
-
-void hsr_prune_nodes(struct hsr_priv *hsr_priv);
+void hsr_prune_nodes(unsigned long data);
 
 int hsr_create_self_node(struct list_head *self_node_db,
                         unsigned char addr_a[ETH_ALEN],
                         unsigned char addr_b[ETH_ALEN]);
 
-void *hsr_get_next_node(struct hsr_priv *hsr_priv, void *_pos,
+void *hsr_get_next_node(struct hsr_priv *hsr, void *_pos,
                        unsigned char addr[ETH_ALEN]);
 
-int hsr_get_node_data(struct hsr_priv *hsr_priv,
+int hsr_get_node_data(struct hsr_priv *hsr,
                      const unsigned char *addr,
                      unsigned char addr_b[ETH_ALEN],
                      unsigned int *addr_b_ifindex,
@@ -50,4 +51,4 @@ int hsr_get_node_data(struct hsr_priv *hsr_priv,
                      int *if2_age,
                      u16 *if2_seq);
 
-#endif /* _HSR_FRAMEREG_H */
+#endif /* __HSR_FRAMEREG_H */
index 3fee521..779d28b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,11 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
- *
- * In addition to routines for registering and unregistering HSR support, this
- * file also contains the receive routine that handles all incoming frames with
- * Ethertype (protocol) ETH_P_PRP (HSRv0), and network device event handling.
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
 #include <linux/netdevice.h>
 #include "hsr_device.h"
 #include "hsr_netlink.h"
 #include "hsr_framereg.h"
-
-
-/* List of all registered virtual HSR devices */
-static LIST_HEAD(hsr_list);
-
-void register_hsr_master(struct hsr_priv *hsr_priv)
-{
-       list_add_tail_rcu(&hsr_priv->hsr_list, &hsr_list);
-}
-
-void unregister_hsr_master(struct hsr_priv *hsr_priv)
-{
-       struct hsr_priv *hsr_priv_it;
-
-       list_for_each_entry(hsr_priv_it, &hsr_list, hsr_list)
-               if (hsr_priv_it == hsr_priv) {
-                       list_del_rcu(&hsr_priv_it->hsr_list);
-                       return;
-               }
-}
-
-bool is_hsr_slave(struct net_device *dev)
-{
-       struct hsr_priv *hsr_priv_it;
-
-       list_for_each_entry_rcu(hsr_priv_it, &hsr_list, hsr_list) {
-               if (dev == hsr_priv_it->slave[0])
-                       return true;
-               if (dev == hsr_priv_it->slave[1])
-                       return true;
-       }
-
-       return false;
-}
-
-
-/* If dev is a HSR slave device, return the virtual master device. Return NULL
- * otherwise.
- */
-static struct hsr_priv *get_hsr_master(struct net_device *dev)
-{
-       struct hsr_priv *hsr_priv;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
-               if ((dev == hsr_priv->slave[0]) ||
-                   (dev == hsr_priv->slave[1])) {
-                       rcu_read_unlock();
-                       return hsr_priv;
-               }
-
-       rcu_read_unlock();
-       return NULL;
-}
-
-
-/* If dev is a HSR slave device, return the other slave device. Return NULL
- * otherwise.
- */
-static struct net_device *get_other_slave(struct hsr_priv *hsr_priv,
-                                         struct net_device *dev)
-{
-       if (dev == hsr_priv->slave[0])
-               return hsr_priv->slave[1];
-       if (dev == hsr_priv->slave[1])
-               return hsr_priv->slave[0];
-
-       return NULL;
-}
+#include "hsr_slave.h"
 
 
 static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
                             void *ptr)
 {
-       struct net_device *slave, *other_slave;
-       struct hsr_priv *hsr_priv;
-       int old_operstate;
+       struct net_device *dev;
+       struct hsr_port *port, *master;
+       struct hsr_priv *hsr;
        int mtu_max;
        int res;
-       struct net_device *dev;
 
        dev = netdev_notifier_info_to_dev(ptr);
-
-       hsr_priv = get_hsr_master(dev);
-       if (hsr_priv) {
-               /* dev is a slave device */
-               slave = dev;
-               other_slave = get_other_slave(hsr_priv, slave);
-       } else {
+       port = hsr_port_get_rtnl(dev);
+       if (port == NULL) {
                if (!is_hsr_master(dev))
-                       return NOTIFY_DONE;
-               hsr_priv = netdev_priv(dev);
-               slave = hsr_priv->slave[0];
-               other_slave = hsr_priv->slave[1];
+                       return NOTIFY_DONE;     /* Not an HSR device */
+               hsr = netdev_priv(dev);
+               port = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       } else {
+               hsr = port->hsr;
        }
 
        switch (event) {
        case NETDEV_UP:         /* Administrative state DOWN */
        case NETDEV_DOWN:       /* Administrative state UP */
        case NETDEV_CHANGE:     /* Link (carrier) state changes */
-               old_operstate = hsr_priv->dev->operstate;
-               hsr_set_carrier(hsr_priv->dev, slave, other_slave);
-               /* netif_stacked_transfer_operstate() cannot be used here since
-                * it doesn't set IF_OPER_LOWERLAYERDOWN (?)
-                */
-               hsr_set_operstate(hsr_priv->dev, slave, other_slave);
-               hsr_check_announce(hsr_priv->dev, old_operstate);
+               hsr_check_carrier_and_operstate(hsr);
                break;
        case NETDEV_CHANGEADDR:
-
-               /* This should not happen since there's no ndo_set_mac_address()
-                * for HSR devices - i.e. not supported.
-                */
-               if (dev == hsr_priv->dev)
+               if (port->type == HSR_PT_MASTER) {
+                       /* This should not happen since there's no
+                        * ndo_set_mac_address() for HSR devices - i.e. not
+                        * supported.
+                        */
                        break;
+               }
 
-               if (dev == hsr_priv->slave[0])
-                       ether_addr_copy(hsr_priv->dev->dev_addr,
-                                       hsr_priv->slave[0]->dev_addr);
+               master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+
+               if (port->type == HSR_PT_SLAVE_A) {
+                       ether_addr_copy(master->dev->dev_addr, dev->dev_addr);
+                       call_netdevice_notifiers(NETDEV_CHANGEADDR, master->dev);
+               }
 
                /* Make sure we recognize frames from ourselves in hsr_rcv() */
-               res = hsr_create_self_node(&hsr_priv->self_node_db,
-                                          hsr_priv->dev->dev_addr,
-                                          hsr_priv->slave[1] ?
-                                               hsr_priv->slave[1]->dev_addr :
-                                               hsr_priv->dev->dev_addr);
+               port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+               res = hsr_create_self_node(&hsr->self_node_db,
+                                          master->dev->dev_addr,
+                                          port ?
+                                               port->dev->dev_addr :
+                                               master->dev->dev_addr);
                if (res)
-                       netdev_warn(hsr_priv->dev,
+                       netdev_warn(master->dev,
                                    "Could not update HSR node address.\n");
-
-               if (dev == hsr_priv->slave[0])
-                       call_netdevice_notifiers(NETDEV_CHANGEADDR, hsr_priv->dev);
                break;
        case NETDEV_CHANGEMTU:
-               if (dev == hsr_priv->dev)
+               if (port->type == HSR_PT_MASTER)
                        break; /* Handled in ndo_change_mtu() */
-               mtu_max = hsr_get_max_mtu(hsr_priv);
-               if (hsr_priv->dev->mtu > mtu_max)
-                       dev_set_mtu(hsr_priv->dev, mtu_max);
+               mtu_max = hsr_get_max_mtu(port->hsr);
+               master = hsr_port_get_hsr(port->hsr, HSR_PT_MASTER);
+               master->dev->mtu = mtu_max;
                break;
        case NETDEV_UNREGISTER:
-               if (dev == hsr_priv->slave[0])
-                       hsr_priv->slave[0] = NULL;
-               if (dev == hsr_priv->slave[1])
-                       hsr_priv->slave[1] = NULL;
-
-               /* There should really be a way to set a new slave device... */
-
+               hsr_del_port(port);
                break;
        case NETDEV_PRE_TYPE_CHANGE:
                /* HSR works only on Ethernet devices. Refuse slave to change
@@ -181,255 +94,16 @@ static int hsr_netdev_notify(struct notifier_block *nb, unsigned long event,
 }
 
 
-static struct timer_list prune_timer;
-
-static void prune_nodes_all(unsigned long data)
-{
-       struct hsr_priv *hsr_priv;
-
-       rcu_read_lock();
-       list_for_each_entry_rcu(hsr_priv, &hsr_list, hsr_list)
-               hsr_prune_nodes(hsr_priv);
-       rcu_read_unlock();
-
-       prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
-       add_timer(&prune_timer);
-}
-
-
-static struct sk_buff *hsr_pull_tag(struct sk_buff *skb)
+struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt)
 {
-       struct hsr_tag *hsr_tag;
-       struct sk_buff *skb2;
-
-       skb2 = skb_share_check(skb, GFP_ATOMIC);
-       if (unlikely(!skb2))
-               goto err_free;
-       skb = skb2;
-
-       if (unlikely(!pskb_may_pull(skb, HSR_TAGLEN)))
-               goto err_free;
+       struct hsr_port *port;
 
-       hsr_tag = (struct hsr_tag *) skb->data;
-       skb->protocol = hsr_tag->encap_proto;
-       skb_pull(skb, HSR_TAGLEN);
-
-       return skb;
-
-err_free:
-       kfree_skb(skb);
+       hsr_for_each_port(hsr, port)
+               if (port->type == pt)
+                       return port;
        return NULL;
 }
 
-
-/* The uses I can see for these HSR supervision frames are:
- * 1) Use the frames that are sent after node initialization ("HSR_TLV.Type =
- *    22") to reset any sequence_nr counters belonging to that node. Useful if
- *    the other node's counter has been reset for some reason.
- *    --
- *    Or not - resetting the counter and bridging the frame would create a
- *    loop, unfortunately.
- *
- * 2) Use the LifeCheck frames to detect ring breaks. I.e. if no LifeCheck
- *    frame is received from a particular node, we know something is wrong.
- *    We just register these (as with normal frames) and throw them away.
- *
- * 3) Allow different MAC addresses for the two slave interfaces, using the
- *    MacAddressA field.
- */
-static bool is_supervision_frame(struct hsr_priv *hsr_priv, struct sk_buff *skb)
-{
-       struct hsr_sup_tag *hsr_stag;
-
-       if (!ether_addr_equal(eth_hdr(skb)->h_dest,
-                             hsr_priv->sup_multicast_addr))
-               return false;
-
-       hsr_stag = (struct hsr_sup_tag *) skb->data;
-       if (get_hsr_stag_path(hsr_stag) != 0x0f)
-               return false;
-       if ((hsr_stag->HSR_TLV_Type != HSR_TLV_ANNOUNCE) &&
-           (hsr_stag->HSR_TLV_Type != HSR_TLV_LIFE_CHECK))
-               return false;
-       if (hsr_stag->HSR_TLV_Length != 12)
-               return false;
-
-       return true;
-}
-
-
-/* Implementation somewhat according to IEC-62439-3, p. 43
- */
-static int hsr_rcv(struct sk_buff *skb, struct net_device *dev,
-                  struct packet_type *pt, struct net_device *orig_dev)
-{
-       struct hsr_priv *hsr_priv;
-       struct net_device *other_slave;
-       struct node_entry *node;
-       bool deliver_to_self;
-       struct sk_buff *skb_deliver;
-       enum hsr_dev_idx dev_in_idx, dev_other_idx;
-       bool dup_out;
-       int ret;
-
-       hsr_priv = get_hsr_master(dev);
-
-       if (!hsr_priv) {
-               /* Non-HSR-slave device 'dev' is connected to a HSR network */
-               kfree_skb(skb);
-               dev->stats.rx_errors++;
-               return NET_RX_SUCCESS;
-       }
-
-       if (dev == hsr_priv->slave[0]) {
-               dev_in_idx = HSR_DEV_SLAVE_A;
-               dev_other_idx = HSR_DEV_SLAVE_B;
-       } else {
-               dev_in_idx = HSR_DEV_SLAVE_B;
-               dev_other_idx = HSR_DEV_SLAVE_A;
-       }
-
-       node = hsr_find_node(&hsr_priv->self_node_db, skb);
-       if (node) {
-               /* Always kill frames sent by ourselves */
-               kfree_skb(skb);
-               return NET_RX_SUCCESS;
-       }
-
-       /* Is this frame a candidate for local reception? */
-       deliver_to_self = false;
-       if ((skb->pkt_type == PACKET_HOST) ||
-           (skb->pkt_type == PACKET_MULTICAST) ||
-           (skb->pkt_type == PACKET_BROADCAST))
-               deliver_to_self = true;
-       else if (ether_addr_equal(eth_hdr(skb)->h_dest,
-                                    hsr_priv->dev->dev_addr)) {
-               skb->pkt_type = PACKET_HOST;
-               deliver_to_self = true;
-       }
-
-
-       rcu_read_lock(); /* node_db */
-       node = hsr_find_node(&hsr_priv->node_db, skb);
-
-       if (is_supervision_frame(hsr_priv, skb)) {
-               skb_pull(skb, sizeof(struct hsr_sup_tag));
-               node = hsr_merge_node(hsr_priv, node, skb, dev_in_idx);
-               if (!node) {
-                       rcu_read_unlock(); /* node_db */
-                       kfree_skb(skb);
-                       hsr_priv->dev->stats.rx_dropped++;
-                       return NET_RX_DROP;
-               }
-               skb_push(skb, sizeof(struct hsr_sup_tag));
-               deliver_to_self = false;
-       }
-
-       if (!node) {
-               /* Source node unknown; this might be a HSR frame from
-                * another net (different multicast address). Ignore it.
-                */
-               rcu_read_unlock(); /* node_db */
-               kfree_skb(skb);
-               return NET_RX_SUCCESS;
-       }
-
-       /* Register ALL incoming frames as outgoing through the other interface.
-        * This allows us to register frames as incoming only if they are valid
-        * for the receiving interface, without using a specific counter for
-        * incoming frames.
-        */
-       dup_out = hsr_register_frame_out(node, dev_other_idx, skb);
-       if (!dup_out)
-               hsr_register_frame_in(node, dev_in_idx);
-
-       /* Forward this frame? */
-       if (!dup_out && (skb->pkt_type != PACKET_HOST))
-               other_slave = get_other_slave(hsr_priv, dev);
-       else
-               other_slave = NULL;
-
-       if (hsr_register_frame_out(node, HSR_DEV_MASTER, skb))
-               deliver_to_self = false;
-
-       rcu_read_unlock(); /* node_db */
-
-       if (!deliver_to_self && !other_slave) {
-               kfree_skb(skb);
-               /* Circulated frame; silently remove it. */
-               return NET_RX_SUCCESS;
-       }
-
-       skb_deliver = skb;
-       if (deliver_to_self && other_slave) {
-               /* skb_clone() is not enough since we will strip the hsr tag
-                * and do address substitution below
-                */
-               skb_deliver = pskb_copy(skb, GFP_ATOMIC);
-               if (!skb_deliver) {
-                       deliver_to_self = false;
-                       hsr_priv->dev->stats.rx_dropped++;
-               }
-       }
-
-       if (deliver_to_self) {
-               bool multicast_frame;
-
-               skb_deliver = hsr_pull_tag(skb_deliver);
-               if (!skb_deliver) {
-                       hsr_priv->dev->stats.rx_dropped++;
-                       goto forward;
-               }
-#if !defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
-               /* Move everything in the header that is after the HSR tag,
-                * to work around alignment problems caused by the 6-byte HSR
-                * tag. In practice, this removes/overwrites the HSR tag in
-                * the header and restores a "standard" packet.
-                */
-               memmove(skb_deliver->data - HSR_TAGLEN, skb_deliver->data,
-                       skb_headlen(skb_deliver));
-
-               /* Adjust skb members so they correspond with the move above.
-                * This cannot possibly underflow skb->data since hsr_pull_tag()
-                * above succeeded.
-                * At this point in the protocol stack, the transport and
-                * network headers have not been set yet, and we haven't touched
-                * the mac header nor the head. So we only need to adjust data
-                * and tail:
-                */
-               skb_deliver->data -= HSR_TAGLEN;
-               skb_deliver->tail -= HSR_TAGLEN;
-#endif
-               skb_deliver->dev = hsr_priv->dev;
-               hsr_addr_subst_source(hsr_priv, skb_deliver);
-               multicast_frame = (skb_deliver->pkt_type == PACKET_MULTICAST);
-               ret = netif_rx(skb_deliver);
-               if (ret == NET_RX_DROP) {
-                       hsr_priv->dev->stats.rx_dropped++;
-               } else {
-                       hsr_priv->dev->stats.rx_packets++;
-                       hsr_priv->dev->stats.rx_bytes += skb->len;
-                       if (multicast_frame)
-                               hsr_priv->dev->stats.multicast++;
-               }
-       }
-
-forward:
-       if (other_slave) {
-               skb_push(skb, ETH_HLEN);
-               skb->dev = other_slave;
-               dev_queue_xmit(skb);
-       }
-
-       return NET_RX_SUCCESS;
-}
-
-
-static struct packet_type hsr_pt __read_mostly = {
-       .type = htons(ETH_P_PRP),
-       .func = hsr_rcv,
-};
-
 static struct notifier_block hsr_nb = {
        .notifier_call = hsr_netdev_notify,     /* Slave event notifications */
 };
@@ -439,18 +113,9 @@ static int __init hsr_init(void)
 {
        int res;
 
-       BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_TAGLEN);
-
-       dev_add_pack(&hsr_pt);
-
-       init_timer(&prune_timer);
-       prune_timer.function = prune_nodes_all;
-       prune_timer.data = 0;
-       prune_timer.expires = jiffies + msecs_to_jiffies(PRUNE_PERIOD);
-       add_timer(&prune_timer);
+       BUILD_BUG_ON(sizeof(struct hsr_tag) != HSR_HLEN);
 
        register_netdevice_notifier(&hsr_nb);
-
        res = hsr_netlink_init();
 
        return res;
@@ -459,9 +124,7 @@ static int __init hsr_init(void)
 static void __exit hsr_exit(void)
 {
        unregister_netdevice_notifier(&hsr_nb);
-       del_timer_sync(&prune_timer);
        hsr_netlink_exit();
-       dev_remove_pack(&hsr_pt);
 }
 
 module_init(hsr_init);
index 56fe060..5a9c699 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,11 +6,11 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
-#ifndef _HSR_PRIVATE_H
-#define _HSR_PRIVATE_H
+#ifndef __HSR_PRIVATE_H
+#define __HSR_PRIVATE_H
 
 #include <linux/netdevice.h>
 #include <linux/list.h>
@@ -29,6 +29,7 @@
  * each node differ before we notify of communication problem?
  */
 #define MAX_SLAVE_DIFF                  3000 /* ms */
+#define HSR_SEQNR_START                        (USHRT_MAX - 1024)
 
 
 /* How often shall we check for broken ring and remove node entries older than
  * path, LSDU_size, sequence Nr }. But we let eth_header() create { h_dest,
  * h_source, h_proto = 0x88FB }, and add { path, LSDU_size, sequence Nr,
  * encapsulated protocol } instead.
+ *
+ * Field names as defined in the IEC:2010 standard for HSR.
  */
-#define HSR_TAGLEN     6
-
-/* Field names below as defined in the IEC:2010 standard for HSR. */
 struct hsr_tag {
        __be16          path_and_LSDU_size;
        __be16          sequence_nr;
        __be16          encap_proto;
 } __packed;
 
+#define HSR_HLEN       6
 
 /* The helper functions below assumes that 'path' occupies the 4 most
  * significant bits of the 16-bit field shared by 'path' and 'LSDU_size' (or
@@ -136,31 +137,47 @@ struct hsr_ethhdr_sp {
 } __packed;
 
 
-enum hsr_dev_idx {
-       HSR_DEV_NONE = -1,
-       HSR_DEV_SLAVE_A = 0,
-       HSR_DEV_SLAVE_B,
-       HSR_DEV_MASTER,
+enum hsr_port_type {
+       HSR_PT_NONE = 0,        /* Must be 0, used by framereg */
+       HSR_PT_SLAVE_A,
+       HSR_PT_SLAVE_B,
+       HSR_PT_INTERLINK,
+       HSR_PT_MASTER,
+       HSR_PT_PORTS,   /* This must be the last item in the enum */
+};
+
+struct hsr_port {
+       struct list_head        port_list;
+       struct net_device       *dev;
+       struct hsr_priv         *hsr;
+       enum hsr_port_type      type;
 };
-#define HSR_MAX_SLAVE  (HSR_DEV_SLAVE_B + 1)
-#define HSR_MAX_DEV    (HSR_DEV_MASTER + 1)
 
 struct hsr_priv {
-       struct list_head        hsr_list;       /* List of hsr devices */
        struct rcu_head         rcu_head;
-       struct net_device       *dev;
-       struct net_device       *slave[HSR_MAX_SLAVE];
-       struct list_head        node_db;        /* Other HSR nodes */
+       struct list_head        ports;
+       struct list_head        node_db;        /* Known HSR nodes */
        struct list_head        self_node_db;   /* MACs of slaves */
        struct timer_list       announce_timer; /* Supervision frame dispatch */
+       struct timer_list       prune_timer;
        int announce_count;
        u16 sequence_nr;
        spinlock_t seqnr_lock;                  /* locking for sequence_nr */
        unsigned char           sup_multicast_addr[ETH_ALEN];
 };
 
-void register_hsr_master(struct hsr_priv *hsr_priv);
-void unregister_hsr_master(struct hsr_priv *hsr_priv);
-bool is_hsr_slave(struct net_device *dev);
+#define hsr_for_each_port(hsr, port) \
+       list_for_each_entry_rcu((port), &(hsr)->ports, port_list)
+
+struct hsr_port *hsr_port_get_hsr(struct hsr_priv *hsr, enum hsr_port_type pt);
+
+/* Caller must ensure skb is a valid HSR frame */
+static inline u16 hsr_get_skb_sequence_nr(struct sk_buff *skb)
+{
+       struct hsr_ethhdr *hsr_ethhdr;
+
+       hsr_ethhdr = (struct hsr_ethhdr *) skb_mac_header(skb);
+       return ntohs(hsr_ethhdr->hsr_tag.sequence_nr);
+}
 
-#endif /*  _HSR_PRIVATE_H */
+#endif /*  __HSR_PRIVATE_H */
index 01a5261..a2c7e4c 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  *
  * Routines for handling Netlink messages for HSR.
  */
@@ -37,13 +37,17 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
        struct net_device *link[2];
        unsigned char multicast_spec;
 
+       if (!data) {
+               netdev_info(dev, "HSR: No slave devices specified\n");
+               return -EINVAL;
+       }
        if (!data[IFLA_HSR_SLAVE1]) {
-               netdev_info(dev, "IFLA_HSR_SLAVE1 missing!\n");
+               netdev_info(dev, "HSR: Slave1 device not specified\n");
                return -EINVAL;
        }
        link[0] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE1]));
        if (!data[IFLA_HSR_SLAVE2]) {
-               netdev_info(dev, "IFLA_HSR_SLAVE2 missing!\n");
+               netdev_info(dev, "HSR: Slave2 device not specified\n");
                return -EINVAL;
        }
        link[1] = __dev_get_by_index(src_net, nla_get_u32(data[IFLA_HSR_SLAVE2]));
@@ -63,21 +67,33 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev,
 
 static int hsr_fill_info(struct sk_buff *skb, const struct net_device *dev)
 {
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
+       int res;
 
-       hsr_priv = netdev_priv(dev);
+       hsr = netdev_priv(dev);
 
-       if (hsr_priv->slave[0])
-               if (nla_put_u32(skb, IFLA_HSR_SLAVE1, hsr_priv->slave[0]->ifindex))
-                       goto nla_put_failure;
+       res = 0;
 
-       if (hsr_priv->slave[1])
-               if (nla_put_u32(skb, IFLA_HSR_SLAVE2, hsr_priv->slave[1]->ifindex))
-                       goto nla_put_failure;
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (port)
+               res = nla_put_u32(skb, IFLA_HSR_SLAVE1, port->dev->ifindex);
+       rcu_read_unlock();
+       if (res)
+               goto nla_put_failure;
+
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+       if (port)
+               res = nla_put_u32(skb, IFLA_HSR_SLAVE2, port->dev->ifindex);
+       rcu_read_unlock();
+       if (res)
+               goto nla_put_failure;
 
        if (nla_put(skb, IFLA_HSR_SUPERVISION_ADDR, ETH_ALEN,
-                   hsr_priv->sup_multicast_addr) ||
-           nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr_priv->sequence_nr))
+                   hsr->sup_multicast_addr) ||
+           nla_put_u16(skb, IFLA_HSR_SEQ_NR, hsr->sequence_nr))
                goto nla_put_failure;
 
        return 0;
@@ -128,13 +144,13 @@ static const struct genl_multicast_group hsr_mcgrps[] = {
  * over one of the slave interfaces. This would indicate an open network ring
  * (i.e. a link has failed somewhere).
  */
-void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
-                     enum hsr_dev_idx dev_idx)
+void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
+                     struct hsr_port *port)
 {
        struct sk_buff *skb;
        void *msg_head;
+       struct hsr_port *master;
        int res;
-       int ifindex;
 
        skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
        if (!skb)
@@ -148,11 +164,7 @@ void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
        if (res < 0)
                goto nla_put_failure;
 
-       if (hsr_priv->slave[dev_idx])
-               ifindex = hsr_priv->slave[dev_idx]->ifindex;
-       else
-               ifindex = -1;
-       res = nla_put_u32(skb, HSR_A_IFINDEX, ifindex);
+       res = nla_put_u32(skb, HSR_A_IFINDEX, port->dev->ifindex);
        if (res < 0)
                goto nla_put_failure;
 
@@ -165,16 +177,20 @@ nla_put_failure:
        kfree_skb(skb);
 
 fail:
-       netdev_warn(hsr_priv->dev, "Could not send HSR ring error message\n");
+       rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       netdev_warn(master->dev, "Could not send HSR ring error message\n");
+       rcu_read_unlock();
 }
 
 /* This is called when we haven't heard from the node with MAC address addr for
  * some time (just before the node is removed from the node table/list).
  */
-void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN])
+void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN])
 {
        struct sk_buff *skb;
        void *msg_head;
+       struct hsr_port *master;
        int res;
 
        skb = genlmsg_new(NLMSG_GOODSIZE, GFP_ATOMIC);
@@ -199,7 +215,10 @@ nla_put_failure:
        kfree_skb(skb);
 
 fail:
-       netdev_warn(hsr_priv->dev, "Could not send HSR node down\n");
+       rcu_read_lock();
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       netdev_warn(master->dev, "Could not send HSR node down\n");
+       rcu_read_unlock();
 }
 
 
@@ -220,7 +239,8 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        /* For sending */
        struct sk_buff *skb_out;
        void *msg_head;
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
+       struct hsr_port *port;
        unsigned char hsr_node_addr_b[ETH_ALEN];
        int hsr_node_if1_age;
        u16 hsr_node_if1_seq;
@@ -267,8 +287,8 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        if (res < 0)
                goto nla_put_failure;
 
-       hsr_priv = netdev_priv(hsr_dev);
-       res = hsr_get_node_data(hsr_priv,
+       hsr = netdev_priv(hsr_dev);
+       res = hsr_get_node_data(hsr,
                        (unsigned char *) nla_data(info->attrs[HSR_A_NODE_ADDR]),
                        hsr_node_addr_b,
                        &addr_b_ifindex,
@@ -301,9 +321,12 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF1_SEQ, hsr_node_if1_seq);
        if (res < 0)
                goto nla_put_failure;
-       if (hsr_priv->slave[0])
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_A);
+       if (port)
                res = nla_put_u32(skb_out, HSR_A_IF1_IFINDEX,
-                                               hsr_priv->slave[0]->ifindex);
+                                 port->dev->ifindex);
+       rcu_read_unlock();
        if (res < 0)
                goto nla_put_failure;
 
@@ -313,9 +336,14 @@ static int hsr_get_node_status(struct sk_buff *skb_in, struct genl_info *info)
        res = nla_put_u16(skb_out, HSR_A_IF2_SEQ, hsr_node_if2_seq);
        if (res < 0)
                goto nla_put_failure;
-       if (hsr_priv->slave[1])
+       rcu_read_lock();
+       port = hsr_port_get_hsr(hsr, HSR_PT_SLAVE_B);
+       if (port)
                res = nla_put_u32(skb_out, HSR_A_IF2_IFINDEX,
-                                               hsr_priv->slave[1]->ifindex);
+                                 port->dev->ifindex);
+       rcu_read_unlock();
+       if (res < 0)
+               goto nla_put_failure;
 
        genlmsg_end(skb_out, msg_head);
        genlmsg_unicast(genl_info_net(info), skb_out, info->snd_portid);
@@ -334,7 +362,7 @@ fail:
        return res;
 }
 
-/* Get a list of MacAddressA of all nodes known to this node (other than self).
+/* Get a list of MacAddressA of all nodes known to this node (including self).
  */
 static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
 {
@@ -345,7 +373,7 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        /* For sending */
        struct sk_buff *skb_out;
        void *msg_head;
-       struct hsr_priv *hsr_priv;
+       struct hsr_priv *hsr;
        void *pos;
        unsigned char addr[ETH_ALEN];
        int res;
@@ -385,17 +413,17 @@ static int hsr_get_node_list(struct sk_buff *skb_in, struct genl_info *info)
        if (res < 0)
                goto nla_put_failure;
 
-       hsr_priv = netdev_priv(hsr_dev);
+       hsr = netdev_priv(hsr_dev);
 
        rcu_read_lock();
-       pos = hsr_get_next_node(hsr_priv, NULL, addr);
+       pos = hsr_get_next_node(hsr, NULL, addr);
        while (pos) {
                res = nla_put(skb_out, HSR_A_NODE_ADDR, ETH_ALEN, addr);
                if (res < 0) {
                        rcu_read_unlock();
                        goto nla_put_failure;
                }
-               pos = hsr_get_next_node(hsr_priv, pos, addr);
+               pos = hsr_get_next_node(hsr, pos, addr);
        }
        rcu_read_unlock();
 
index d4579dc..3f6b95b 100644 (file)
@@ -1,4 +1,4 @@
-/* Copyright 2011-2013 Autronica Fire and Security AS
+/* Copyright 2011-2014 Autronica Fire and Security AS
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms of the GNU General Public License as published by the Free
@@ -6,7 +6,7 @@
  * any later version.
  *
  * Author(s):
- *     2011-2013 Arvid Brodin, arvid.brodin@xdin.com
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
  */
 
 #ifndef __HSR_NETLINK_H
 #include <uapi/linux/hsr_netlink.h>
 
 struct hsr_priv;
+struct hsr_port;
 
 int __init hsr_netlink_init(void);
 void __exit hsr_netlink_exit(void);
 
-void hsr_nl_ringerror(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN],
-                     int dev_idx);
-void hsr_nl_nodedown(struct hsr_priv *hsr_priv, unsigned char addr[ETH_ALEN]);
+void hsr_nl_ringerror(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN],
+                     struct hsr_port *port);
+void hsr_nl_nodedown(struct hsr_priv *hsr, unsigned char addr[ETH_ALEN]);
 void hsr_nl_framedrop(int dropcount, int dev_idx);
 void hsr_nl_linkdown(int dev_idx);
 
diff --git a/net/hsr/hsr_slave.c b/net/hsr/hsr_slave.c
new file mode 100644 (file)
index 0000000..a348dcb
--- /dev/null
@@ -0,0 +1,196 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#include "hsr_slave.h"
+#include <linux/etherdevice.h>
+#include <linux/if_arp.h>
+#include "hsr_main.h"
+#include "hsr_device.h"
+#include "hsr_forward.h"
+#include "hsr_framereg.h"
+
+
+static rx_handler_result_t hsr_handle_frame(struct sk_buff **pskb)
+{
+       struct sk_buff *skb = *pskb;
+       struct hsr_port *port;
+
+       if (!skb_mac_header_was_set(skb)) {
+               WARN_ONCE(1, "%s: skb invalid", __func__);
+               return RX_HANDLER_PASS;
+       }
+
+       rcu_read_lock(); /* hsr->node_db, hsr->ports */
+       port = hsr_port_get_rcu(skb->dev);
+
+       if (hsr_addr_is_self(port->hsr, eth_hdr(skb)->h_source)) {
+               /* Directly kill frames sent by ourselves */
+               kfree_skb(skb);
+               goto finish_consume;
+       }
+
+       if (eth_hdr(skb)->h_proto != htons(ETH_P_PRP))
+               goto finish_pass;
+
+       skb_push(skb, ETH_HLEN);
+
+       hsr_forward_skb(skb, port);
+
+finish_consume:
+       rcu_read_unlock(); /* hsr->node_db, hsr->ports */
+       return RX_HANDLER_CONSUMED;
+
+finish_pass:
+       rcu_read_unlock(); /* hsr->node_db, hsr->ports */
+       return RX_HANDLER_PASS;
+}
+
+bool hsr_port_exists(const struct net_device *dev)
+{
+       return rcu_access_pointer(dev->rx_handler) == hsr_handle_frame;
+}
+
+
+static int hsr_check_dev_ok(struct net_device *dev)
+{
+       /* Don't allow HSR on non-ethernet like devices */
+       if ((dev->flags & IFF_LOOPBACK) || (dev->type != ARPHRD_ETHER) ||
+           (dev->addr_len != ETH_ALEN)) {
+               netdev_info(dev, "Cannot use loopback or non-ethernet device as HSR slave.\n");
+               return -EINVAL;
+       }
+
+       /* Don't allow enslaving hsr devices */
+       if (is_hsr_master(dev)) {
+               netdev_info(dev, "Cannot create trees of HSR devices.\n");
+               return -EINVAL;
+       }
+
+       if (hsr_port_exists(dev)) {
+               netdev_info(dev, "This device is already a HSR slave.\n");
+               return -EINVAL;
+       }
+
+       if (dev->priv_flags & IFF_802_1Q_VLAN) {
+               netdev_info(dev, "HSR on top of VLAN is not yet supported in this driver.\n");
+               return -EINVAL;
+       }
+
+       if (dev->priv_flags & IFF_DONT_BRIDGE) {
+               netdev_info(dev, "This device does not support bridging.\n");
+               return -EOPNOTSUPP;
+       }
+
+       /* HSR over bonded devices has not been tested, but I'm not sure it
+        * won't work...
+        */
+
+       return 0;
+}
+
+
+/* Setup device to be added to the HSR bridge. */
+static int hsr_portdev_setup(struct net_device *dev, struct hsr_port *port)
+{
+       int res;
+
+       dev_hold(dev);
+       res = dev_set_promiscuity(dev, 1);
+       if (res)
+               goto fail_promiscuity;
+
+       /* FIXME:
+        * What does net device "adjacency" mean? Should we do
+        * res = netdev_master_upper_dev_link(port->dev, port->hsr->dev); ?
+        */
+
+       res = netdev_rx_handler_register(dev, hsr_handle_frame, port);
+       if (res)
+               goto fail_rx_handler;
+       dev_disable_lro(dev);
+
+       return 0;
+
+fail_rx_handler:
+       dev_set_promiscuity(dev, -1);
+fail_promiscuity:
+       dev_put(dev);
+
+       return res;
+}
+
+int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
+                enum hsr_port_type type)
+{
+       struct hsr_port *port, *master;
+       int res;
+
+       if (type != HSR_PT_MASTER) {
+               res = hsr_check_dev_ok(dev);
+               if (res)
+                       return res;
+       }
+
+       port = hsr_port_get_hsr(hsr, type);
+       if (port != NULL)
+               return -EBUSY;  /* This port already exists */
+
+       port = kzalloc(sizeof(*port), GFP_KERNEL);
+       if (port == NULL)
+               return -ENOMEM;
+
+       if (type != HSR_PT_MASTER) {
+               res = hsr_portdev_setup(dev, port);
+               if (res)
+                       goto fail_dev_setup;
+       }
+
+       port->hsr = hsr;
+       port->dev = dev;
+       port->type = type;
+
+       list_add_tail_rcu(&port->port_list, &hsr->ports);
+       synchronize_rcu();
+
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       netdev_update_features(master->dev);
+       dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+
+       return 0;
+
+fail_dev_setup:
+       kfree(port);
+       return res;
+}
+
+void hsr_del_port(struct hsr_port *port)
+{
+       struct hsr_priv *hsr;
+       struct hsr_port *master;
+
+       hsr = port->hsr;
+       master = hsr_port_get_hsr(hsr, HSR_PT_MASTER);
+       list_del_rcu(&port->port_list);
+
+       if (port != master) {
+               netdev_update_features(master->dev);
+               dev_set_mtu(master->dev, hsr_get_max_mtu(hsr));
+               netdev_rx_handler_unregister(port->dev);
+               dev_set_promiscuity(port->dev, -1);
+       }
+
+       /* FIXME?
+        * netdev_upper_dev_unlink(port->dev, port->hsr->dev);
+        */
+
+       synchronize_rcu();
+       dev_put(port->dev);
+}
diff --git a/net/hsr/hsr_slave.h b/net/hsr/hsr_slave.h
new file mode 100644 (file)
index 0000000..3ccfbf7
--- /dev/null
@@ -0,0 +1,38 @@
+/* Copyright 2011-2014 Autronica Fire and Security AS
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the Free
+ * Software Foundation; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Author(s):
+ *     2011-2014 Arvid Brodin, arvid.brodin@alten.se
+ */
+
+#ifndef __HSR_SLAVE_H
+#define __HSR_SLAVE_H
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/rtnetlink.h>
+#include "hsr_main.h"
+
+int hsr_add_port(struct hsr_priv *hsr, struct net_device *dev,
+                enum hsr_port_type pt);
+void hsr_del_port(struct hsr_port *port);
+bool hsr_port_exists(const struct net_device *dev);
+
+static inline struct hsr_port *hsr_port_get_rtnl(const struct net_device *dev)
+{
+       ASSERT_RTNL();
+       return hsr_port_exists(dev) ?
+                               rtnl_dereference(dev->rx_handler_data) : NULL;
+}
+
+static inline struct hsr_port *hsr_port_get_rcu(const struct net_device *dev)
+{
+       return hsr_port_exists(dev) ?
+                               rcu_dereference(dev->rx_handler_data) : NULL;
+}
+
+#endif /* __HSR_SLAVE_H */
index fe6bd7a..016b77e 100644 (file)
@@ -80,14 +80,14 @@ lowpan_dev_info *lowpan_dev_info(const struct net_device *dev)
 static inline void lowpan_address_flip(u8 *src, u8 *dest)
 {
        int i;
+
        for (i = 0; i < IEEE802154_ADDR_LEN; i++)
                (dest)[IEEE802154_ADDR_LEN - i - 1] = (src)[i];
 }
 
-static int lowpan_header_create(struct sk_buff *skb,
-                          struct net_device *dev,
-                          unsigned short type, const void *_daddr,
-                          const void *_saddr, unsigned int len)
+static int lowpan_header_create(struct sk_buff *skb, struct net_device *dev,
+                               unsigned short type, const void *_daddr,
+                               const void *_saddr, unsigned int len)
 {
        const u8 *saddr = _saddr;
        const u8 *daddr = _daddr;
@@ -144,7 +144,7 @@ static int lowpan_header_create(struct sk_buff *skb,
 }
 
 static int lowpan_give_skb_to_devices(struct sk_buff *skb,
-                                       struct net_device *dev)
+                                     struct net_device *dev)
 {
        struct lowpan_dev_record *entry;
        struct sk_buff *skb_cp;
@@ -368,24 +368,28 @@ static netdev_tx_t lowpan_xmit(struct sk_buff *skb, struct net_device *dev)
 static struct wpan_phy *lowpan_get_phy(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_phy(real_dev);
 }
 
 static __le16 lowpan_get_pan_id(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_pan_id(real_dev);
 }
 
 static __le16 lowpan_get_short_addr(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_short_addr(real_dev);
 }
 
 static u8 lowpan_get_dsn(const struct net_device *dev)
 {
        struct net_device *real_dev = lowpan_dev_info(dev)->real_dev;
+
        return ieee802154_mlme_ops(real_dev)->get_dsn(real_dev);
 }
 
@@ -454,7 +458,7 @@ static int lowpan_validate(struct nlattr *tb[], struct nlattr *data[])
 }
 
 static int lowpan_rcv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *pt, struct net_device *orig_dev)
+                     struct packet_type *pt, struct net_device *orig_dev)
 {
        struct ieee802154_hdr hdr;
        int ret;
index 351d9a9..29e0de6 100644 (file)
@@ -40,9 +40,7 @@
 
 #include "af802154.h"
 
-/*
- * Utility function for families
- */
+/* Utility function for families */
 struct net_device*
 ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
 {
@@ -87,8 +85,8 @@ ieee802154_get_dev(struct net *net, const struct ieee802154_addr *addr)
                rtnl_unlock();
                break;
        default:
-               pr_warning("Unsupported ieee802154 address type: %d\n",
-                               addr->mode);
+               pr_warn("Unsupported ieee802154 address type: %d\n",
+                       addr->mode);
                break;
        }
 
@@ -106,7 +104,7 @@ static int ieee802154_sock_release(struct socket *sock)
        return 0;
 }
 static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
-               struct msghdr *msg, size_t len)
+                                  struct msghdr *msg, size_t len)
 {
        struct sock *sk = sock->sk;
 
@@ -114,7 +112,7 @@ static int ieee802154_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
 }
 
 static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
-               int addr_len)
+                               int addr_len)
 {
        struct sock *sk = sock->sk;
 
@@ -125,7 +123,7 @@ static int ieee802154_sock_bind(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
-                       int addr_len, int flags)
+                                  int addr_len, int flags)
 {
        struct sock *sk = sock->sk;
 
@@ -139,7 +137,7 @@ static int ieee802154_sock_connect(struct socket *sock, struct sockaddr *uaddr,
 }
 
 static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
-               unsigned int cmd)
+                               unsigned int cmd)
 {
        struct ifreq ifr;
        int ret = -ENOIOCTLCMD;
@@ -167,7 +165,7 @@ static int ieee802154_dev_ioctl(struct sock *sk, struct ifreq __user *arg,
 }
 
 static int ieee802154_sock_ioctl(struct socket *sock, unsigned int cmd,
-               unsigned long arg)
+                                unsigned long arg)
 {
        struct sock *sk = sock->sk;
 
@@ -238,8 +236,7 @@ static const struct proto_ops ieee802154_dgram_ops = {
 };
 
 
-/*
- * Create a socket. Initialise the socket, blank the addresses
+/* Create a socket. Initialise the socket, blank the addresses
  * set the state.
  */
 static int ieee802154_create(struct net *net, struct socket *sock,
@@ -301,13 +298,14 @@ static const struct net_proto_family ieee802154_family_ops = {
 };
 
 static int ieee802154_rcv(struct sk_buff *skb, struct net_device *dev,
-       struct packet_type *pt, struct net_device *orig_dev)
+                         struct packet_type *pt, struct net_device *orig_dev)
 {
        if (!netif_running(dev))
                goto drop;
        pr_debug("got frame, type %d, dev %p\n", dev->type, dev);
 #ifdef DEBUG
-       print_hex_dump_bytes("ieee802154_rcv ", DUMP_PREFIX_NONE, skb->data, skb->len);
+       print_hex_dump_bytes("ieee802154_rcv ",
+                            DUMP_PREFIX_NONE, skb->data, skb->len);
 #endif
 
        if (!net_eq(dev_net(dev), &init_net))
index 4f0ed87..ef2ad8a 100644 (file)
@@ -149,8 +149,7 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
                spin_lock_bh(&sk->sk_receive_queue.lock);
                skb = skb_peek(&sk->sk_receive_queue);
                if (skb != NULL) {
-                       /*
-                        * We will only return the amount
+                       /* We will only return the amount
                         * of this packet since that is all
                         * that will be read.
                         */
@@ -161,12 +160,13 @@ static int dgram_ioctl(struct sock *sk, int cmd, unsigned long arg)
        }
 
        }
+
        return -ENOIOCTLCMD;
 }
 
 /* FIXME: autobind */
 static int dgram_connect(struct sock *sk, struct sockaddr *uaddr,
-                       int len)
+                        int len)
 {
        struct sockaddr_ieee802154 *addr = (struct sockaddr_ieee802154 *)uaddr;
        struct dgram_sock *ro = dgram_sk(sk);
@@ -205,7 +205,7 @@ static int dgram_disconnect(struct sock *sk, int flags)
 }
 
 static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
-               struct msghdr *msg, size_t size)
+                        struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -248,8 +248,8 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                       msg->msg_flags & MSG_DONTWAIT,
-                       &err);
+                                 msg->msg_flags & MSG_DONTWAIT,
+                                 &err);
        if (!skb)
                goto out_dev;
 
@@ -262,7 +262,8 @@ static int dgram_sendmsg(struct kiocb *iocb, struct sock *sk,
        cb->ackreq = ro->want_ack;
 
        if (msg->msg_name) {
-               DECLARE_SOCKADDR(struct sockaddr_ieee802154*, daddr, msg->msg_name);
+               DECLARE_SOCKADDR(struct sockaddr_ieee802154*,
+                                daddr, msg->msg_name);
 
                ieee802154_addr_from_sa(&dst_addr, &daddr->addr);
        } else {
@@ -304,8 +305,8 @@ out:
 }
 
 static int dgram_recvmsg(struct kiocb *iocb, struct sock *sk,
-               struct msghdr *msg, size_t len, int noblock, int flags,
-               int *addr_len)
+                        struct msghdr *msg, size_t len, int noblock,
+                        int flags, int *addr_len)
 {
        size_t copied = 0;
        int err = -EOPNOTSUPP;
@@ -398,6 +399,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
                                          dgram_sk(sk))) {
                        if (prev) {
                                struct sk_buff *clone;
+
                                clone = skb_clone(skb, GFP_ATOMIC);
                                if (clone)
                                        dgram_rcv_skb(prev, clone);
@@ -407,9 +409,9 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
                }
        }
 
-       if (prev)
+       if (prev) {
                dgram_rcv_skb(prev, skb);
-       else {
+       else {
                kfree_skb(skb);
                ret = NET_RX_DROP;
        }
@@ -419,7 +421,7 @@ int ieee802154_dgram_deliver(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int dgram_getsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int __user *optlen)
+                           char __user *optval, int __user *optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
 
@@ -463,7 +465,7 @@ static int dgram_getsockopt(struct sock *sk, int level, int optname,
 }
 
 static int dgram_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                           char __user *optval, unsigned int optlen)
 {
        struct dgram_sock *ro = dgram_sk(sk);
        struct net *net = sock_net(sk);
index 8b83a23..5d352f8 100644 (file)
@@ -43,7 +43,7 @@ struct genl_info;
 struct sk_buff *ieee802154_nl_create(int flags, u8 req);
 int ieee802154_nl_mcast(struct sk_buff *msg, unsigned int group);
 struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
-               int flags, u8 req);
+                                       int flags, u8 req);
 int ieee802154_nl_reply(struct sk_buff *msg, struct genl_info *info);
 
 extern struct genl_family nl802154_family;
index 26efcf4..9222966 100644 (file)
@@ -52,7 +52,7 @@ struct sk_buff *ieee802154_nl_create(int flags, u8 req)
 
        spin_lock_irqsave(&ieee802154_seq_lock, f);
        hdr = genlmsg_put(msg, 0, ieee802154_seq_num++,
-                       &nl802154_family, flags, req);
+                         &nl802154_family, flags, req);
        spin_unlock_irqrestore(&ieee802154_seq_lock, f);
        if (!hdr) {
                nlmsg_free(msg);
@@ -86,7 +86,7 @@ struct sk_buff *ieee802154_nl_new_reply(struct genl_info *info,
                return NULL;
 
        hdr = genlmsg_put_reply(msg, info,
-                       &nl802154_family, flags, req);
+                               &nl802154_family, flags, req);
        if (!hdr) {
                nlmsg_free(msg);
                return NULL;
index a3281b8..c6bfe22 100644 (file)
@@ -60,7 +60,8 @@ static __le16 nla_get_shortaddr(const struct nlattr *nla)
 }
 
 int ieee802154_nl_assoc_indic(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 cap)
+                             struct ieee802154_addr *addr,
+                             u8 cap)
 {
        struct sk_buff *msg;
 
@@ -93,7 +94,7 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_assoc_indic);
 
 int ieee802154_nl_assoc_confirm(struct net_device *dev, __le16 short_addr,
-               u8 status)
+                               u8 status)
 {
        struct sk_buff *msg;
 
@@ -119,7 +120,8 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_assoc_confirm);
 
 int ieee802154_nl_disassoc_indic(struct net_device *dev,
-               struct ieee802154_addr *addr, u8 reason)
+                                struct ieee802154_addr *addr,
+                                u8 reason)
 {
        struct sk_buff *msg;
 
@@ -205,8 +207,9 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_beacon_indic);
 
 int ieee802154_nl_scan_confirm(struct net_device *dev,
-               u8 status, u8 scan_type, u32 unscanned, u8 page,
-               u8 *edl/* , struct list_head *pan_desc_list */)
+                              u8 status, u8 scan_type,
+                              u32 unscanned, u8 page,
+                              u8 *edl/* , struct list_head *pan_desc_list */)
 {
        struct sk_buff *msg;
 
@@ -260,7 +263,7 @@ nla_put_failure:
 EXPORT_SYMBOL(ieee802154_nl_start_confirm);
 
 static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
-       u32 seq, int flags, struct net_device *dev)
+                                   u32 seq, int flags, struct net_device *dev)
 {
        void *hdr;
        struct wpan_phy *phy;
@@ -270,7 +273,7 @@ static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 portid,
        pr_debug("%s\n", __func__);
 
        hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
-               IEEE802154_LIST_IFACE);
+                         IEEE802154_LIST_IFACE);
        if (!hdr)
                goto out;
 
@@ -330,14 +333,16 @@ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info)
 
        if (info->attrs[IEEE802154_ATTR_DEV_NAME]) {
                char name[IFNAMSIZ + 1];
+
                nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME],
-                               sizeof(name));
+                           sizeof(name));
                dev = dev_get_by_name(&init_net, name);
-       } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX])
+       } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) {
                dev = dev_get_by_index(&init_net,
                        nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX]));
-       else
+       } else {
                return NULL;
+       }
 
        if (!dev)
                return NULL;
@@ -435,7 +440,7 @@ int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info)
        int ret = -EOPNOTSUPP;
 
        if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] &&
-               !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
+           !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) ||
            !info->attrs[IEEE802154_ATTR_REASON])
                return -EINVAL;
 
@@ -464,8 +469,7 @@ out:
        return ret;
 }
 
-/*
- * PANid, channel, beacon_order = 15, superframe_order = 15,
+/* PANid, channel, beacon_order = 15, superframe_order = 15,
  * PAN_coordinator, battery_life_extension = 0,
  * coord_realignment = 0, security_enable = 0
 */
@@ -559,8 +563,8 @@ int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info)
                page = 0;
 
 
-       ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page,
-                       duration);
+       ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels,
+                                                page, duration);
 
 out:
        dev_put(dev);
@@ -570,7 +574,8 @@ out:
 int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
 {
        /* Request for interface name, index, type, IEEE address,
-          PAN Id, short address */
+        * PAN Id, short address
+        */
        struct sk_buff *msg;
        struct net_device *dev = NULL;
        int rc = -ENOBUFS;
@@ -586,7 +591,7 @@ int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        rc = ieee802154_nl_fill_iface(msg, info->snd_portid, info->snd_seq,
-                       0, dev);
+                                     0, dev);
        if (rc < 0)
                goto out_free;
 
@@ -598,7 +603,6 @@ out_free:
 out_dev:
        dev_put(dev);
        return rc;
-
 }
 
 int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
@@ -616,7 +620,8 @@ int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb)
                        goto cont;
 
                if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).portid,
-                       cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0)
+                                            cb->nlh->nlmsg_seq,
+                                            NLM_F_MULTI, dev) < 0)
                        break;
 cont:
                idx++;
@@ -765,6 +770,7 @@ ieee802154_llsec_parse_key_id(struct genl_info *info,
        case IEEE802154_SCF_KEY_SHORT_INDEX:
        {
                u32 source = nla_get_u32(info->attrs[IEEE802154_ATTR_LLSEC_KEY_SOURCE_SHORT]);
+
                desc->short_source = cpu_to_le32(source);
                break;
        }
@@ -842,7 +848,7 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        hdr = genlmsg_put(msg, 0, info->snd_seq, &nl802154_family, 0,
-               IEEE802154_LLSEC_GETPARAMS);
+                         IEEE802154_LLSEC_GETPARAMS);
        if (!hdr)
                goto out_free;
 
@@ -946,7 +952,7 @@ struct llsec_dump_data {
 
 static int
 ieee802154_llsec_dump_table(struct sk_buff *skb, struct netlink_callback *cb,
-                           int (*step)(struct llsec_dump_data*))
+                           int (*step)(struct llsec_dump_data *))
 {
        struct net *net = sock_net(skb->sk);
        struct net_device *dev;
index 89b265a..972baf8 100644 (file)
@@ -36,7 +36,7 @@
 #include "ieee802154.h"
 
 static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
-       u32 seq, int flags, struct wpan_phy *phy)
+                                 u32 seq, int flags, struct wpan_phy *phy)
 {
        void *hdr;
        int i, pages = 0;
@@ -48,7 +48,7 @@ static int ieee802154_nl_fill_phy(struct sk_buff *msg, u32 portid,
                return -EMSGSIZE;
 
        hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags,
-               IEEE802154_LIST_PHY);
+                         IEEE802154_LIST_PHY);
        if (!hdr)
                goto out;
 
@@ -80,7 +80,8 @@ out:
 int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
 {
        /* Request for interface name, index, type, IEEE address,
-          PAN Id, short address */
+        * PAN Id, short address
+        */
        struct sk_buff *msg;
        struct wpan_phy *phy;
        const char *name;
@@ -105,7 +106,7 @@ int ieee802154_list_phy(struct sk_buff *skb, struct genl_info *info)
                goto out_dev;
 
        rc = ieee802154_nl_fill_phy(msg, info->snd_portid, info->snd_seq,
-                       0, phy);
+                                   0, phy);
        if (rc < 0)
                goto out_free;
 
@@ -117,7 +118,6 @@ out_free:
 out_dev:
        wpan_phy_put(phy);
        return rc;
-
 }
 
 struct dump_phy_data {
@@ -137,10 +137,10 @@ static int ieee802154_dump_phy_iter(struct wpan_phy *phy, void *_data)
                return 0;
 
        rc = ieee802154_nl_fill_phy(data->skb,
-                       NETLINK_CB(data->cb->skb).portid,
-                       data->cb->nlh->nlmsg_seq,
-                       NLM_F_MULTI,
-                       phy);
+                                   NETLINK_CB(data->cb->skb).portid,
+                                   data->cb->nlh->nlmsg_seq,
+                                   NLM_F_MULTI,
+                                   phy);
 
        if (rc < 0) {
                data->idx--;
@@ -238,10 +238,9 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
 
                addr.sa_family = ARPHRD_IEEE802154;
                nla_memcpy(&addr.sa_data, info->attrs[IEEE802154_ATTR_HW_ADDR],
-                               IEEE802154_ADDR_LEN);
+                          IEEE802154_ADDR_LEN);
 
-               /*
-                * strangely enough, some callbacks (inetdev_event) from
+               /* strangely enough, some callbacks (inetdev_event) from
                 * dev_set_mac_address require RTNL_LOCK
                 */
                rtnl_lock();
index 74d54fa..9d1f648 100644 (file)
@@ -96,7 +96,7 @@ out:
 }
 
 static int raw_connect(struct sock *sk, struct sockaddr *uaddr,
-                       int addr_len)
+                      int addr_len)
 {
        return -ENOTSUPP;
 }
@@ -106,8 +106,8 @@ static int raw_disconnect(struct sock *sk, int flags)
        return 0;
 }
 
-static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
-                      size_t size)
+static int raw_sendmsg(struct kiocb *iocb, struct sock *sk,
+                      struct msghdr *msg, size_t size)
 {
        struct net_device *dev;
        unsigned int mtu;
@@ -145,7 +145,7 @@ static int raw_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        hlen = LL_RESERVED_SPACE(dev);
        tlen = dev->needed_tailroom;
        skb = sock_alloc_send_skb(sk, hlen + tlen + size,
-                       msg->msg_flags & MSG_DONTWAIT, &err);
+                                 msg->msg_flags & MSG_DONTWAIT, &err);
        if (!skb)
                goto out_dev;
 
@@ -235,7 +235,6 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
                bh_lock_sock(sk);
                if (!sk->sk_bound_dev_if ||
                    sk->sk_bound_dev_if == dev->ifindex) {
-
                        struct sk_buff *clone;
 
                        clone = skb_clone(skb, GFP_ATOMIC);
@@ -248,13 +247,13 @@ void ieee802154_raw_deliver(struct net_device *dev, struct sk_buff *skb)
 }
 
 static int raw_getsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, int __user *optlen)
+                         char __user *optval, int __user *optlen)
 {
        return -EOPNOTSUPP;
 }
 
 static int raw_setsockopt(struct sock *sk, int level, int optname,
-                   char __user *optval, unsigned int optlen)
+                         char __user *optval, unsigned int optlen)
 {
        return -EOPNOTSUPP;
 }
@@ -274,4 +273,3 @@ struct proto ieee802154_raw_prot = {
        .getsockopt     = raw_getsockopt,
        .setsockopt     = raw_setsockopt,
 };
-
index 6f1428c..ffec6ce 100644 (file)
@@ -30,6 +30,8 @@
 
 #include "reassembly.h"
 
+static const char lowpan_frags_cache_name[] = "lowpan-frags";
+
 struct lowpan_frag_info {
        __be16 d_tag;
        u16 d_size;
@@ -50,29 +52,25 @@ static unsigned int lowpan_hash_frag(__be16 tag, u16 d_size,
                                     const struct ieee802154_addr *saddr,
                                     const struct ieee802154_addr *daddr)
 {
-       u32 c;
-
        net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
-       c = jhash_3words(ieee802154_addr_hash(saddr),
-                        ieee802154_addr_hash(daddr),
-                        (__force u32)(tag + (d_size << 16)),
-                        lowpan_frags.rnd);
-
-       return c & (INETFRAGS_HASHSZ - 1);
+       return jhash_3words(ieee802154_addr_hash(saddr),
+                           ieee802154_addr_hash(daddr),
+                           (__force u32)(tag + (d_size << 16)),
+                           lowpan_frags.rnd);
 }
 
-static unsigned int lowpan_hashfn(struct inet_frag_queue *q)
+static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
 {
-       struct lowpan_frag_queue *fq;
+       const struct lowpan_frag_queue *fq;
 
        fq = container_of(q, struct lowpan_frag_queue, q);
        return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
 }
 
-static bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
+static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
 {
-       struct lowpan_frag_queue *fq;
-       struct lowpan_create_arg *arg = a;
+       const struct lowpan_frag_queue *fq;
+       const struct lowpan_create_arg *arg = a;
 
        fq = container_of(q, struct lowpan_frag_queue, q);
        return  fq->tag == arg->tag && fq->d_size == arg->d_size &&
@@ -80,10 +78,10 @@ static bool lowpan_frag_match(struct inet_frag_queue *q, void *a)
                ieee802154_addr_equal(&fq->daddr, arg->dst);
 }
 
-static void lowpan_frag_init(struct inet_frag_queue *q, void *a)
+static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
 {
+       const struct lowpan_create_arg *arg = a;
        struct lowpan_frag_queue *fq;
-       struct lowpan_create_arg *arg = a;
 
        fq = container_of(q, struct lowpan_frag_queue, q);
 
@@ -103,7 +101,7 @@ static void lowpan_frag_expire(unsigned long data)
 
        spin_lock(&fq->q.lock);
 
-       if (fq->q.last_in & INET_FRAG_COMPLETE)
+       if (fq->q.flags & INET_FRAG_COMPLETE)
                goto out;
 
        inet_frag_kill(&fq->q, &lowpan_frags);
@@ -128,7 +126,6 @@ fq_find(struct net *net, const struct lowpan_frag_info *frag_info,
        arg.src = src;
        arg.dst = dst;
 
-       read_lock(&lowpan_frags.lock);
        hash = lowpan_hash_frag(frag_info->d_tag, frag_info->d_size, src, dst);
 
        q = inet_frag_find(&ieee802154_lowpan->frags,
@@ -147,7 +144,7 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
        struct net_device *dev;
        int end, offset;
 
-       if (fq->q.last_in & INET_FRAG_COMPLETE)
+       if (fq->q.flags & INET_FRAG_COMPLETE)
                goto err;
 
        offset = lowpan_cb(skb)->d_offset << 3;
@@ -159,14 +156,14 @@ static int lowpan_frag_queue(struct lowpan_frag_queue *fq,
                 * or have different end, the segment is corrupted.
                 */
                if (end < fq->q.len ||
-                   ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
+                   ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
                        goto err;
-               fq->q.last_in |= INET_FRAG_LAST_IN;
+               fq->q.flags |= INET_FRAG_LAST_IN;
                fq->q.len = end;
        } else {
                if (end > fq->q.len) {
                        /* Some bits beyond end -> corruption. */
-                       if (fq->q.last_in & INET_FRAG_LAST_IN)
+                       if (fq->q.flags & INET_FRAG_LAST_IN)
                                goto err;
                        fq->q.len = end;
                }
@@ -206,13 +203,13 @@ found:
        if (frag_type == LOWPAN_DISPATCH_FRAG1) {
                /* Calculate uncomp. 6lowpan header to estimate full size */
                fq->q.meat += lowpan_uncompress_size(skb, NULL);
-               fq->q.last_in |= INET_FRAG_FIRST_IN;
+               fq->q.flags |= INET_FRAG_FIRST_IN;
        } else {
                fq->q.meat += skb->len;
        }
        add_frag_mem_limit(&fq->q, skb->truesize);
 
-       if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+       if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            fq->q.meat == fq->q.len) {
                int res;
                unsigned long orefdst = skb->_skb_refdst;
@@ -223,7 +220,6 @@ found:
                return res;
        }
 
-       inet_frag_lru_move(&fq->q);
        return -1;
 err:
        kfree_skb(skb);
@@ -373,11 +369,10 @@ int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type)
        if (frag_info->d_size > ieee802154_lowpan->max_dsize)
                goto err;
 
-       inet_frag_evictor(&ieee802154_lowpan->frags, &lowpan_frags, false);
-
        fq = fq_find(net, frag_info, &source, &dest);
        if (fq != NULL) {
                int ret;
+
                spin_lock(&fq->q.lock);
                ret = lowpan_frag_queue(fq, skb, frag_type);
                spin_unlock(&fq->q.lock);
@@ -393,20 +388,25 @@ err:
 EXPORT_SYMBOL(lowpan_frag_rcv);
 
 #ifdef CONFIG_SYSCTL
+static int zero;
+
 static struct ctl_table lowpan_frags_ns_ctl_table[] = {
        {
                .procname       = "6lowpanfrag_high_thresh",
                .data           = &init_net.ieee802154_lowpan.frags.high_thresh,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &init_net.ieee802154_lowpan.frags.low_thresh
        },
        {
                .procname       = "6lowpanfrag_low_thresh",
                .data           = &init_net.ieee802154_lowpan.frags.low_thresh,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &init_net.ieee802154_lowpan.frags.high_thresh
        },
        {
                .procname       = "6lowpanfrag_time",
@@ -425,10 +425,12 @@ static struct ctl_table lowpan_frags_ns_ctl_table[] = {
        { }
 };
 
+/* secret interval has been deprecated */
+static int lowpan_frags_secret_interval_unused;
 static struct ctl_table lowpan_frags_ctl_table[] = {
        {
                .procname       = "6lowpanfrag_secret_interval",
-               .data           = &lowpan_frags.secret_interval,
+               .data           = &lowpan_frags_secret_interval_unused,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
@@ -451,7 +453,10 @@ static int __net_init lowpan_frags_ns_sysctl_register(struct net *net)
                        goto err_alloc;
 
                table[0].data = &ieee802154_lowpan->frags.high_thresh;
+               table[0].extra1 = &ieee802154_lowpan->frags.low_thresh;
+               table[0].extra2 = &init_net.ieee802154_lowpan.frags.high_thresh;
                table[1].data = &ieee802154_lowpan->frags.low_thresh;
+               table[1].extra2 = &ieee802154_lowpan->frags.high_thresh;
                table[2].data = &ieee802154_lowpan->frags.timeout;
                table[3].data = &ieee802154_lowpan->max_dsize;
 
@@ -568,8 +573,10 @@ int __init lowpan_net_frag_init(void)
        lowpan_frags.qsize = sizeof(struct frag_queue);
        lowpan_frags.match = lowpan_frag_match;
        lowpan_frags.frag_expire = lowpan_frag_expire;
-       lowpan_frags.secret_interval = 10 * 60 * HZ;
-       inet_frags_init(&lowpan_frags);
+       lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
+       ret = inet_frags_init(&lowpan_frags);
+       if (ret)
+               goto err_pernet;
 
        return ret;
 err_pernet:
index 8d6f670..4955e0f 100644 (file)
@@ -48,7 +48,8 @@ MASTER_SHOW(transmit_power, "%d +- 1 dB");
 MASTER_SHOW(cca_mode, "%d");
 
 static ssize_t channels_supported_show(struct device *dev,
-                           struct device_attribute *attr, char *buf)
+                                      struct device_attribute *attr,
+                                      char *buf)
 {
        struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
        int ret;
@@ -57,7 +58,7 @@ static ssize_t channels_supported_show(struct device *dev,
        mutex_lock(&phy->pib_lock);
        for (i = 0; i < 32; i++) {
                ret = snprintf(buf + len, PAGE_SIZE - len,
-                               "%#09x\n", phy->channels_supported[i]);
+                              "%#09x\n", phy->channels_supported[i]);
                if (ret < 0)
                        break;
                len += ret;
@@ -80,6 +81,7 @@ ATTRIBUTE_GROUPS(pmib);
 static void wpan_phy_release(struct device *d)
 {
        struct wpan_phy *phy = container_of(d, struct wpan_phy, dev);
+
        kfree(phy);
 }
 
@@ -121,11 +123,12 @@ static int wpan_phy_iter(struct device *dev, void *_data)
 {
        struct wpan_phy_iter_data *wpid = _data;
        struct wpan_phy *phy = container_of(dev, struct wpan_phy, dev);
+
        return wpid->fn(phy, wpid->data);
 }
 
 int wpan_phy_for_each(int (*fn)(struct wpan_phy *phy, void *data),
-               void *data)
+                     void *data)
 {
        struct wpan_phy_iter_data wpid = {
                .fn = fn,
@@ -197,6 +200,7 @@ EXPORT_SYMBOL(wpan_phy_free);
 static int __init wpan_phy_class_init(void)
 {
        int rc;
+
        rc = class_register(&wpan_phy_class);
        if (rc)
                goto err;
index 05c57f0..dbc10d8 100644 (file)
@@ -307,6 +307,10 @@ config NET_IPVTI
          the notion of a secure tunnel for IPSEC and then use routing protocol
          on top.
 
+config NET_UDP_TUNNEL
+       tristate
+       default n
+
 config INET_AH
        tristate "IP: AH transformation"
        select XFRM_ALGO
index f032688..8ee1cd4 100644 (file)
@@ -22,6 +22,7 @@ obj-$(CONFIG_NET_IPIP) += ipip.o
 gre-y := gre_demux.o
 obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
 obj-$(CONFIG_NET_IPGRE) += ip_gre.o
+obj-$(CONFIG_NET_UDP_TUNNEL) += udp_tunnel.o
 obj-$(CONFIG_NET_IPVTI) += ip_vti.o
 obj-$(CONFIG_SYN_COOKIES) += syncookies.o
 obj-$(CONFIG_INET_AH) += ah4.o
index d5e6836..d156b3c 100644 (file)
@@ -1429,6 +1429,9 @@ static int inet_gro_complete(struct sk_buff *skb, int nhoff)
        int proto = iph->protocol;
        int err = -ENOSYS;
 
+       if (skb->encapsulation)
+               skb_set_inner_network_header(skb, nhoff);
+
        csum_replace2(&iph->check, iph->tot_len, newlen);
        iph->tot_len = newlen;
 
index a3095fd..90c0e83 100644 (file)
@@ -76,6 +76,7 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_daddr = fl4->daddr;
        inet->inet_dport = usin->sin_port;
        sk->sk_state = TCP_ESTABLISHED;
+       inet_set_txhash(sk);
        inet->inet_id = jiffies;
 
        sk_dst_set(sk, &rt->dst);
index e944937..214882e 100644 (file)
@@ -180,11 +180,12 @@ static BLOCKING_NOTIFIER_HEAD(inetaddr_chain);
 static void inet_del_ifa(struct in_device *in_dev, struct in_ifaddr **ifap,
                         int destroy);
 #ifdef CONFIG_SYSCTL
-static void devinet_sysctl_register(struct in_device *idev);
+static int devinet_sysctl_register(struct in_device *idev);
 static void devinet_sysctl_unregister(struct in_device *idev);
 #else
-static void devinet_sysctl_register(struct in_device *idev)
+static int devinet_sysctl_register(struct in_device *idev)
 {
+       return 0;
 }
 static void devinet_sysctl_unregister(struct in_device *idev)
 {
@@ -232,6 +233,7 @@ EXPORT_SYMBOL(in_dev_finish_destroy);
 static struct in_device *inetdev_init(struct net_device *dev)
 {
        struct in_device *in_dev;
+       int err = -ENOMEM;
 
        ASSERT_RTNL();
 
@@ -252,7 +254,13 @@ static struct in_device *inetdev_init(struct net_device *dev)
        /* Account for reference dev->ip_ptr (below) */
        in_dev_hold(in_dev);
 
-       devinet_sysctl_register(in_dev);
+       err = devinet_sysctl_register(in_dev);
+       if (err) {
+               in_dev->dead = 1;
+               in_dev_put(in_dev);
+               in_dev = NULL;
+               goto out;
+       }
        ip_mc_init_dev(in_dev);
        if (dev->flags & IFF_UP)
                ip_mc_up(in_dev);
@@ -260,7 +268,7 @@ static struct in_device *inetdev_init(struct net_device *dev)
        /* we can receive as soon as ip_ptr is set -- do this last */
        rcu_assign_pointer(dev->ip_ptr, in_dev);
 out:
-       return in_dev;
+       return in_dev ?: ERR_PTR(err);
 out_kfree:
        kfree(in_dev);
        in_dev = NULL;
@@ -1347,8 +1355,8 @@ static int inetdev_event(struct notifier_block *this, unsigned long event,
        if (!in_dev) {
                if (event == NETDEV_REGISTER) {
                        in_dev = inetdev_init(dev);
-                       if (!in_dev)
-                               return notifier_from_errno(-ENOMEM);
+                       if (IS_ERR(in_dev))
+                               return notifier_from_errno(PTR_ERR(in_dev));
                        if (dev->flags & IFF_LOOPBACK) {
                                IN_DEV_CONF_SET(in_dev, NOXFRM, 1);
                                IN_DEV_CONF_SET(in_dev, NOPOLICY, 1);
@@ -2182,11 +2190,21 @@ static void __devinet_sysctl_unregister(struct ipv4_devconf *cnf)
        kfree(t);
 }
 
-static void devinet_sysctl_register(struct in_device *idev)
+static int devinet_sysctl_register(struct in_device *idev)
 {
-       neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
-       __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
+       int err;
+
+       if (!sysctl_dev_name_is_allowed(idev->dev->name))
+               return -EINVAL;
+
+       err = neigh_sysctl_register(idev->dev, idev->arp_parms, NULL);
+       if (err)
+               return err;
+       err = __devinet_sysctl_register(dev_net(idev->dev), idev->dev->name,
                                        &idev->cnf);
+       if (err)
+               neigh_sysctl_unregister(idev->arp_parms);
+       return err;
 }
 
 static void devinet_sysctl_unregister(struct in_device *idev)
index 4e9619b..0485bf7 100644 (file)
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
 
        skb_push(skb, hdr_len);
 
+       skb_reset_transport_header(skb);
        greh = (struct gre_base_hdr *)skb->data;
        greh->flags = tnl_flags_to_gre_flags(tpi->flags);
        greh->protocol = tpi->proto;
index eb92deb..6556263 100644 (file)
@@ -74,7 +74,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
        /* segment inner packet. */
        enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
        segs = skb_mac_gso_segment(skb, enc_features);
-       if (!segs || IS_ERR(segs)) {
+       if (IS_ERR_OR_NULL(segs)) {
                skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
                goto out;
        }
@@ -263,6 +263,9 @@ static int gre_gro_complete(struct sk_buff *skb, int nhoff)
        int err = -ENOENT;
        __be16 type;
 
+       skb->encapsulation = 1;
+       skb_shinfo(skb)->gso_type = SKB_GSO_GRE;
+
        type = greh->protocol;
        if (greh->flags & GRE_KEY)
                grehlen += GRE_HEADER_SECTION;
index 79c3d94..ea7d4af 100644 (file)
@@ -663,16 +663,16 @@ static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
        /* Checkin full IP header plus 8 bytes of protocol to
         * avoid additional coding at protocol handlers.
         */
-       if (!pskb_may_pull(skb, iph->ihl * 4 + 8))
+       if (!pskb_may_pull(skb, iph->ihl * 4 + 8)) {
+               ICMP_INC_STATS_BH(dev_net(skb->dev), ICMP_MIB_INERRORS);
                return;
+       }
 
        raw_icmp_error(skb, protocol, info);
 
-       rcu_read_lock();
        ipprot = rcu_dereference(inet_protos[protocol]);
        if (ipprot && ipprot->err_handler)
                ipprot->err_handler(skb, info);
-       rcu_read_unlock();
 }
 
 static bool icmp_tag_validation(int proto)
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
                                /* fall through */
                        case 0:
                                info = ntohs(icmph->un.frag.mtu);
-                               if (!info)
-                                       goto out;
                        }
                        break;
                case ICMP_SR_FAILED:
index 6748d42..f10eab4 100644 (file)
@@ -1321,7 +1321,7 @@ void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
        atomic_set(&im->refcnt, 1);
        spin_lock_init(&im->lock);
 #ifdef CONFIG_IP_MULTICAST
-       setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
+       setup_timer(&im->timer, igmp_timer_expire, (unsigned long)im);
        im->unsolicit_count = IGMP_Unsolicited_Report_Count;
 #endif
 
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
        rtnl_lock();
        in_dev = ip_mc_find_dev(net, imr);
+       if (!in_dev) {
+               ret = -ENODEV;
+               goto out;
+       }
        ifindex = imr->imr_ifindex;
        for (imlp = &inet->mc_list;
             (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
 
                *imlp = iml->next_rcu;
 
-               if (in_dev)
-                       ip_mc_dec_group(in_dev, group);
+               ip_mc_dec_group(in_dev, group);
                rtnl_unlock();
                /* decrease mem now to avoid the memleak warning */
                atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
                kfree_rcu(iml, rcu);
                return 0;
        }
-       if (!in_dev)
-               ret = -ENODEV;
+out:
        rtnl_unlock();
        return ret;
 }
index 3b01959..9eb89f3 100644 (file)
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
 
+#define INETFRAGS_EVICT_BUCKETS   128
+#define INETFRAGS_EVICT_MAX      512
+
+/* don't rebuild inetfrag table with new secret more often than this */
+#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
+
 /* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
  * Value : 0xff if frame should be dropped.
  *         0 or INET_ECN_CE value, to be ORed in to final iph->tos field
@@ -46,24 +52,39 @@ const u8 ip_frag_ecn_table[16] = {
 };
 EXPORT_SYMBOL(ip_frag_ecn_table);
 
-static void inet_frag_secret_rebuild(unsigned long dummy)
+static unsigned int
+inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
+{
+       return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
+}
+
+static bool inet_frag_may_rebuild(struct inet_frags *f)
+{
+       return time_after(jiffies,
+              f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
+}
+
+static void inet_frag_secret_rebuild(struct inet_frags *f)
 {
-       struct inet_frags *f = (struct inet_frags *)dummy;
-       unsigned long now = jiffies;
        int i;
 
-       /* Per bucket lock NOT needed here, due to write lock protection */
-       write_lock(&f->lock);
+       write_seqlock_bh(&f->rnd_seqlock);
+
+       if (!inet_frag_may_rebuild(f))
+               goto out;
 
        get_random_bytes(&f->rnd, sizeof(u32));
+
        for (i = 0; i < INETFRAGS_HASHSZ; i++) {
                struct inet_frag_bucket *hb;
                struct inet_frag_queue *q;
                struct hlist_node *n;
 
                hb = &f->hash[i];
+               spin_lock(&hb->chain_lock);
+
                hlist_for_each_entry_safe(q, n, &hb->chain, list) {
-                       unsigned int hval = f->hashfn(q);
+                       unsigned int hval = inet_frag_hashfn(f, q);
 
                        if (hval != i) {
                                struct inet_frag_bucket *hb_dest;
@@ -72,76 +93,200 @@ static void inet_frag_secret_rebuild(unsigned long dummy)
 
                                /* Relink to new hash chain. */
                                hb_dest = &f->hash[hval];
+
+                               /* This is the only place where we take
+                                * another chain_lock while already holding
+                                * one.  As this will not run concurrently,
+                                * we cannot deadlock on hb_dest lock below, if its
+                                * already locked it will be released soon since
+                                * other caller cannot be waiting for hb lock
+                                * that we've taken above.
+                                */
+                               spin_lock_nested(&hb_dest->chain_lock,
+                                                SINGLE_DEPTH_NESTING);
                                hlist_add_head(&q->list, &hb_dest->chain);
+                               spin_unlock(&hb_dest->chain_lock);
                        }
                }
+               spin_unlock(&hb->chain_lock);
+       }
+
+       f->rebuild = false;
+       f->last_rebuild_jiffies = jiffies;
+out:
+       write_sequnlock_bh(&f->rnd_seqlock);
+}
+
+static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
+{
+       return q->net->low_thresh == 0 ||
+              frag_mem_limit(q->net) >= q->net->low_thresh;
+}
+
+static unsigned int
+inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
+{
+       struct inet_frag_queue *fq;
+       struct hlist_node *n;
+       unsigned int evicted = 0;
+       HLIST_HEAD(expired);
+
+evict_again:
+       spin_lock(&hb->chain_lock);
+
+       hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
+               if (!inet_fragq_should_evict(fq))
+                       continue;
+
+               if (!del_timer(&fq->timer)) {
+                       /* q expiring right now thus increment its refcount so
+                        * it won't be freed under us and wait until the timer
+                        * has finished executing then destroy it
+                        */
+                       atomic_inc(&fq->refcnt);
+                       spin_unlock(&hb->chain_lock);
+                       del_timer_sync(&fq->timer);
+                       WARN_ON(atomic_read(&fq->refcnt) != 1);
+                       inet_frag_put(fq, f);
+                       goto evict_again;
+               }
+
+               fq->flags |= INET_FRAG_EVICTED;
+               hlist_del(&fq->list);
+               hlist_add_head(&fq->list, &expired);
+               ++evicted;
        }
-       write_unlock(&f->lock);
 
-       mod_timer(&f->secret_timer, now + f->secret_interval);
+       spin_unlock(&hb->chain_lock);
+
+       hlist_for_each_entry_safe(fq, n, &expired, list)
+               f->frag_expire((unsigned long) fq);
+
+       return evicted;
 }
 
-void inet_frags_init(struct inet_frags *f)
+static void inet_frag_worker(struct work_struct *work)
+{
+       unsigned int budget = INETFRAGS_EVICT_BUCKETS;
+       unsigned int i, evicted = 0;
+       struct inet_frags *f;
+
+       f = container_of(work, struct inet_frags, frags_work);
+
+       BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
+
+       local_bh_disable();
+
+       for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
+               evicted += inet_evict_bucket(f, &f->hash[i]);
+               i = (i + 1) & (INETFRAGS_HASHSZ - 1);
+               if (evicted > INETFRAGS_EVICT_MAX)
+                       break;
+       }
+
+       f->next_bucket = i;
+
+       local_bh_enable();
+
+       if (f->rebuild && inet_frag_may_rebuild(f))
+               inet_frag_secret_rebuild(f);
+}
+
+static void inet_frag_schedule_worker(struct inet_frags *f)
+{
+       if (unlikely(!work_pending(&f->frags_work)))
+               schedule_work(&f->frags_work);
+}
+
+int inet_frags_init(struct inet_frags *f)
 {
        int i;
 
+       INIT_WORK(&f->frags_work, inet_frag_worker);
+
        for (i = 0; i < INETFRAGS_HASHSZ; i++) {
                struct inet_frag_bucket *hb = &f->hash[i];
 
                spin_lock_init(&hb->chain_lock);
                INIT_HLIST_HEAD(&hb->chain);
        }
-       rwlock_init(&f->lock);
 
-       setup_timer(&f->secret_timer, inet_frag_secret_rebuild,
-                       (unsigned long)f);
-       f->secret_timer.expires = jiffies + f->secret_interval;
-       add_timer(&f->secret_timer);
+       seqlock_init(&f->rnd_seqlock);
+       f->last_rebuild_jiffies = 0;
+       f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
+                                           NULL);
+       if (!f->frags_cachep)
+               return -ENOMEM;
+
+       return 0;
 }
 EXPORT_SYMBOL(inet_frags_init);
 
 void inet_frags_init_net(struct netns_frags *nf)
 {
-       nf->nqueues = 0;
        init_frag_mem_limit(nf);
-       INIT_LIST_HEAD(&nf->lru_list);
-       spin_lock_init(&nf->lru_lock);
 }
 EXPORT_SYMBOL(inet_frags_init_net);
 
 void inet_frags_fini(struct inet_frags *f)
 {
-       del_timer(&f->secret_timer);
+       cancel_work_sync(&f->frags_work);
+       kmem_cache_destroy(f->frags_cachep);
 }
 EXPORT_SYMBOL(inet_frags_fini);
 
 void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
 {
-       nf->low_thresh = 0;
+       unsigned int seq;
+       int i;
 
+       nf->low_thresh = 0;
        local_bh_disable();
-       inet_frag_evictor(nf, f, true);
+
+evict_again:
+       seq = read_seqbegin(&f->rnd_seqlock);
+
+       for (i = 0; i < INETFRAGS_HASHSZ ; i++)
+               inet_evict_bucket(f, &f->hash[i]);
+
+       if (read_seqretry(&f->rnd_seqlock, seq))
+               goto evict_again;
+
        local_bh_enable();
 
        percpu_counter_destroy(&nf->mem);
 }
 EXPORT_SYMBOL(inet_frags_exit_net);
 
-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+static struct inet_frag_bucket *
+get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
+__acquires(hb->chain_lock)
 {
        struct inet_frag_bucket *hb;
-       unsigned int hash;
+       unsigned int seq, hash;
+
+ restart:
+       seq = read_seqbegin(&f->rnd_seqlock);
 
-       read_lock(&f->lock);
-       hash = f->hashfn(fq);
+       hash = inet_frag_hashfn(f, fq);
        hb = &f->hash[hash];
 
        spin_lock(&hb->chain_lock);
+       if (read_seqretry(&f->rnd_seqlock, seq)) {
+               spin_unlock(&hb->chain_lock);
+               goto restart;
+       }
+
+       return hb;
+}
+
+static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
+{
+       struct inet_frag_bucket *hb;
+
+       hb = get_frag_bucket_locked(fq, f);
        hlist_del(&fq->list);
        spin_unlock(&hb->chain_lock);
-
-       read_unlock(&f->lock);
-       inet_frag_lru_del(fq);
 }
 
 void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
@@ -149,30 +294,29 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
        if (del_timer(&fq->timer))
                atomic_dec(&fq->refcnt);
 
-       if (!(fq->last_in & INET_FRAG_COMPLETE)) {
+       if (!(fq->flags & INET_FRAG_COMPLETE)) {
                fq_unlink(fq, f);
                atomic_dec(&fq->refcnt);
-               fq->last_in |= INET_FRAG_COMPLETE;
+               fq->flags |= INET_FRAG_COMPLETE;
        }
 }
 EXPORT_SYMBOL(inet_frag_kill);
 
 static inline void frag_kfree_skb(struct netns_frags *nf, struct inet_frags *f,
-               struct sk_buff *skb)
+                                 struct sk_buff *skb)
 {
        if (f->skb_free)
                f->skb_free(skb);
        kfree_skb(skb);
 }
 
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
-                                       int *work)
+void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
 {
        struct sk_buff *fp;
        struct netns_frags *nf;
        unsigned int sum, sum_truesize = 0;
 
-       WARN_ON(!(q->last_in & INET_FRAG_COMPLETE));
+       WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
        WARN_ON(del_timer(&q->timer) != 0);
 
        /* Release all fragment data. */
@@ -186,87 +330,32 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f,
                fp = xp;
        }
        sum = sum_truesize + f->qsize;
-       if (work)
-               *work -= sum;
        sub_frag_mem_limit(q, sum);
 
        if (f->destructor)
                f->destructor(q);
-       kfree(q);
-
+       kmem_cache_free(f->frags_cachep, q);
 }
 EXPORT_SYMBOL(inet_frag_destroy);
 
-int inet_frag_evictor(struct netns_frags *nf, struct inet_frags *f, bool force)
-{
-       struct inet_frag_queue *q;
-       int work, evicted = 0;
-
-       if (!force) {
-               if (frag_mem_limit(nf) <= nf->high_thresh)
-                       return 0;
-       }
-
-       work = frag_mem_limit(nf) - nf->low_thresh;
-       while (work > 0 || force) {
-               spin_lock(&nf->lru_lock);
-
-               if (list_empty(&nf->lru_list)) {
-                       spin_unlock(&nf->lru_lock);
-                       break;
-               }
-
-               q = list_first_entry(&nf->lru_list,
-                               struct inet_frag_queue, lru_list);
-               atomic_inc(&q->refcnt);
-               /* Remove q from list to avoid several CPUs grabbing it */
-               list_del_init(&q->lru_list);
-
-               spin_unlock(&nf->lru_lock);
-
-               spin_lock(&q->lock);
-               if (!(q->last_in & INET_FRAG_COMPLETE))
-                       inet_frag_kill(q, f);
-               spin_unlock(&q->lock);
-
-               if (atomic_dec_and_test(&q->refcnt))
-                       inet_frag_destroy(q, f, &work);
-               evicted++;
-       }
-
-       return evicted;
-}
-EXPORT_SYMBOL(inet_frag_evictor);
-
 static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
-               struct inet_frag_queue *qp_in, struct inet_frags *f,
-               void *arg)
+                                               struct inet_frag_queue *qp_in,
+                                               struct inet_frags *f,
+                                               void *arg)
 {
-       struct inet_frag_bucket *hb;
+       struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
        struct inet_frag_queue *qp;
-       unsigned int hash;
-
-       read_lock(&f->lock); /* Protects against hash rebuild */
-       /*
-        * While we stayed w/o the lock other CPU could update
-        * the rnd seed, so we need to re-calculate the hash
-        * chain. Fortunatelly the qp_in can be used to get one.
-        */
-       hash = f->hashfn(qp_in);
-       hb = &f->hash[hash];
-       spin_lock(&hb->chain_lock);
 
 #ifdef CONFIG_SMP
        /* With SMP race we have to recheck hash table, because
-        * such entry could be created on other cpu, while we
-        * released the hash bucket lock.
+        * such entry could have been created on other cpu before
+        * we acquired hash bucket lock.
         */
        hlist_for_each_entry(qp, &hb->chain, list) {
                if (qp->net == nf && f->match(qp, arg)) {
                        atomic_inc(&qp->refcnt);
                        spin_unlock(&hb->chain_lock);
-                       read_unlock(&f->lock);
-                       qp_in->last_in |= INET_FRAG_COMPLETE;
+                       qp_in->flags |= INET_FRAG_COMPLETE;
                        inet_frag_put(qp_in, f);
                        return qp;
                }
@@ -278,19 +367,24 @@ static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
 
        atomic_inc(&qp->refcnt);
        hlist_add_head(&qp->list, &hb->chain);
-       inet_frag_lru_add(nf, qp);
+
        spin_unlock(&hb->chain_lock);
-       read_unlock(&f->lock);
 
        return qp;
 }
 
 static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
-               struct inet_frags *f, void *arg)
+                                              struct inet_frags *f,
+                                              void *arg)
 {
        struct inet_frag_queue *q;
 
-       q = kzalloc(f->qsize, GFP_ATOMIC);
+       if (frag_mem_limit(nf) > nf->high_thresh) {
+               inet_frag_schedule_worker(f);
+               return NULL;
+       }
+
+       q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
        if (q == NULL)
                return NULL;
 
@@ -301,13 +395,13 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
        setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
        spin_lock_init(&q->lock);
        atomic_set(&q->refcnt, 1);
-       INIT_LIST_HEAD(&q->lru_list);
 
        return q;
 }
 
 static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
-               struct inet_frags *f, void *arg)
+                                               struct inet_frags *f,
+                                               void *arg)
 {
        struct inet_frag_queue *q;
 
@@ -319,13 +413,17 @@ static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
 }
 
 struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
-               struct inet_frags *f, void *key, unsigned int hash)
-       __releases(&f->lock)
+                                      struct inet_frags *f, void *key,
+                                      unsigned int hash)
 {
        struct inet_frag_bucket *hb;
        struct inet_frag_queue *q;
        int depth = 0;
 
+       if (frag_mem_limit(nf) > nf->low_thresh)
+               inet_frag_schedule_worker(f);
+
+       hash &= (INETFRAGS_HASHSZ - 1);
        hb = &f->hash[hash];
 
        spin_lock(&hb->chain_lock);
@@ -333,18 +431,22 @@ struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
                if (q->net == nf && f->match(q, key)) {
                        atomic_inc(&q->refcnt);
                        spin_unlock(&hb->chain_lock);
-                       read_unlock(&f->lock);
                        return q;
                }
                depth++;
        }
        spin_unlock(&hb->chain_lock);
-       read_unlock(&f->lock);
 
        if (depth <= INETFRAGS_MAXDEPTH)
                return inet_frag_create(nf, f, key);
-       else
-               return ERR_PTR(-ENOBUFS);
+
+       if (inet_frag_may_rebuild(f)) {
+               if (!f->rebuild)
+                       f->rebuild = true;
+               inet_frag_schedule_worker(f);
+       }
+
+       return ERR_PTR(-ENOBUFS);
 }
 EXPORT_SYMBOL(inet_frag_find);
 
index ed32313..15f0e2b 100644 (file)
@@ -55,6 +55,7 @@
  */
 
 static int sysctl_ipfrag_max_dist __read_mostly = 64;
+static const char ip_frag_cache_name[] = "ip4-frags";
 
 struct ipfrag_skb_cb
 {
@@ -86,11 +87,6 @@ static inline u8 ip4_frag_ecn(u8 tos)
 
 static struct inet_frags ip4_frags;
 
-int ip_frag_nqueues(struct net *net)
-{
-       return net->ipv4.frags.nqueues;
-}
-
 int ip_frag_mem(struct net *net)
 {
        return sum_frag_mem_limit(&net->ipv4.frags);
@@ -109,21 +105,21 @@ static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
        net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
        return jhash_3words((__force u32)id << 16 | prot,
                            (__force u32)saddr, (__force u32)daddr,
-                           ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1);
+                           ip4_frags.rnd);
 }
 
-static unsigned int ip4_hashfn(struct inet_frag_queue *q)
+static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
 {
-       struct ipq *ipq;
+       const struct ipq *ipq;
 
        ipq = container_of(q, struct ipq, q);
        return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
 }
 
-static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
+static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
 {
-       struct ipq *qp;
-       struct ip4_create_arg *arg = a;
+       const struct ipq *qp;
+       const struct ip4_create_arg *arg = a;
 
        qp = container_of(q, struct ipq, q);
        return  qp->id == arg->iph->id &&
@@ -133,14 +129,14 @@ static bool ip4_frag_match(struct inet_frag_queue *q, void *a)
                qp->user == arg->user;
 }
 
-static void ip4_frag_init(struct inet_frag_queue *q, void *a)
+static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
 {
        struct ipq *qp = container_of(q, struct ipq, q);
        struct netns_ipv4 *ipv4 = container_of(q->net, struct netns_ipv4,
                                               frags);
        struct net *net = container_of(ipv4, struct net, ipv4);
 
-       struct ip4_create_arg *arg = a;
+       const struct ip4_create_arg *arg = a;
 
        qp->protocol = arg->iph->protocol;
        qp->id = arg->iph->id;
@@ -177,18 +173,6 @@ static void ipq_kill(struct ipq *ipq)
        inet_frag_kill(&ipq->q, &ip4_frags);
 }
 
-/* Memory limiting on fragments.  Evictor trashes the oldest
- * fragment queue until we are back under the threshold.
- */
-static void ip_evictor(struct net *net)
-{
-       int evicted;
-
-       evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags, false);
-       if (evicted)
-               IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted);
-}
-
 /*
  * Oops, a fragment queue timed out.  Kill it and send an ICMP reply.
  */
@@ -202,19 +186,22 @@ static void ip_expire(unsigned long arg)
 
        spin_lock(&qp->q.lock);
 
-       if (qp->q.last_in & INET_FRAG_COMPLETE)
+       if (qp->q.flags & INET_FRAG_COMPLETE)
                goto out;
 
        ipq_kill(qp);
-
-       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
 
-       if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) {
+       if (!(qp->q.flags & INET_FRAG_EVICTED)) {
                struct sk_buff *head = qp->q.fragments;
                const struct iphdr *iph;
                int err;
 
+               IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT);
+
+               if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
+                       goto out;
+
                rcu_read_lock();
                head->dev = dev_get_by_index_rcu(net, qp->iif);
                if (!head->dev)
@@ -227,8 +214,7 @@ static void ip_expire(unsigned long arg)
                if (err)
                        goto out_rcu_unlock;
 
-               /*
-                * Only an end host needs to send an ICMP
+               /* Only an end host needs to send an ICMP
                 * "Fragment Reassembly Timeout" message, per RFC792.
                 */
                if (qp->user == IP_DEFRAG_AF_PACKET ||
@@ -237,7 +223,6 @@ static void ip_expire(unsigned long arg)
                     (skb_rtable(head)->rt_type != RTN_LOCAL)))
                        goto out_rcu_unlock;
 
-
                /* Send an ICMP "Fragment Reassembly Timeout" message. */
                icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
 out_rcu_unlock:
@@ -260,7 +245,6 @@ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user)
        arg.iph = iph;
        arg.user = user;
 
-       read_lock(&ip4_frags.lock);
        hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
 
        q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
@@ -319,7 +303,7 @@ static int ip_frag_reinit(struct ipq *qp)
        } while (fp);
        sub_frag_mem_limit(&qp->q, sum_truesize);
 
-       qp->q.last_in = 0;
+       qp->q.flags = 0;
        qp->q.len = 0;
        qp->q.meat = 0;
        qp->q.fragments = NULL;
@@ -340,7 +324,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
        int err = -ENOENT;
        u8 ecn;
 
-       if (qp->q.last_in & INET_FRAG_COMPLETE)
+       if (qp->q.flags & INET_FRAG_COMPLETE)
                goto err;
 
        if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) &&
@@ -367,9 +351,9 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                 * or have different end, the segment is corrupted.
                 */
                if (end < qp->q.len ||
-                   ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len))
+                   ((qp->q.flags & INET_FRAG_LAST_IN) && end != qp->q.len))
                        goto err;
-               qp->q.last_in |= INET_FRAG_LAST_IN;
+               qp->q.flags |= INET_FRAG_LAST_IN;
                qp->q.len = end;
        } else {
                if (end&7) {
@@ -379,7 +363,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
                }
                if (end > qp->q.len) {
                        /* Some bits beyond end -> corruption. */
-                       if (qp->q.last_in & INET_FRAG_LAST_IN)
+                       if (qp->q.flags & INET_FRAG_LAST_IN)
                                goto err;
                        qp->q.len = end;
                }
@@ -488,13 +472,13 @@ found:
        qp->ecn |= ecn;
        add_frag_mem_limit(&qp->q, skb->truesize);
        if (offset == 0)
-               qp->q.last_in |= INET_FRAG_FIRST_IN;
+               qp->q.flags |= INET_FRAG_FIRST_IN;
 
        if (ip_hdr(skb)->frag_off & htons(IP_DF) &&
            skb->len + ihl > qp->q.max_size)
                qp->q.max_size = skb->len + ihl;
 
-       if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+       if (qp->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            qp->q.meat == qp->q.len) {
                unsigned long orefdst = skb->_skb_refdst;
 
@@ -505,7 +489,6 @@ found:
        }
 
        skb_dst_drop(skb);
-       inet_frag_lru_move(&qp->q);
        return -EINPROGRESS;
 
 err:
@@ -655,9 +638,6 @@ int ip_defrag(struct sk_buff *skb, u32 user)
        net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev);
        IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS);
 
-       /* Start by cleaning up the memory. */
-       ip_evictor(net);
-
        /* Lookup (or create) queue header */
        if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) {
                int ret;
@@ -721,14 +701,17 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
                .data           = &init_net.ipv4.frags.high_thresh,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &init_net.ipv4.frags.low_thresh
        },
        {
                .procname       = "ipfrag_low_thresh",
                .data           = &init_net.ipv4.frags.low_thresh,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &init_net.ipv4.frags.high_thresh
        },
        {
                .procname       = "ipfrag_time",
@@ -740,10 +723,12 @@ static struct ctl_table ip4_frags_ns_ctl_table[] = {
        { }
 };
 
+/* secret interval has been deprecated */
+static int ip4_frags_secret_interval_unused;
 static struct ctl_table ip4_frags_ctl_table[] = {
        {
                .procname       = "ipfrag_secret_interval",
-               .data           = &ip4_frags.secret_interval,
+               .data           = &ip4_frags_secret_interval_unused,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
@@ -771,7 +756,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
                        goto err_alloc;
 
                table[0].data = &net->ipv4.frags.high_thresh;
+               table[0].extra1 = &net->ipv4.frags.low_thresh;
+               table[0].extra2 = &init_net.ipv4.frags.high_thresh;
                table[1].data = &net->ipv4.frags.low_thresh;
+               table[1].extra2 = &net->ipv4.frags.high_thresh;
                table[2].data = &net->ipv4.frags.timeout;
 
                /* Don't export sysctls to unprivileged users */
@@ -873,6 +861,7 @@ void __init ipfrag_init(void)
        ip4_frags.qsize = sizeof(struct ipq);
        ip4_frags.match = ip4_frag_match;
        ip4_frags.frag_expire = ip_expire;
-       ip4_frags.secret_interval = 10 * 60 * HZ;
-       inet_frags_init(&ip4_frags);
+       ip4_frags.frags_cache_name = ip_frag_cache_name;
+       if (inet_frags_init(&ip4_frags))
+               panic("IP: failed to allocate ip4_frags cache\n");
 }
index 5e7aece..ad38249 100644 (file)
@@ -288,6 +288,10 @@ int ip_options_compile(struct net *net,
                        optptr++;
                        continue;
                }
+               if (unlikely(l < 2)) {
+                       pp_ptr = optptr;
+                       goto error;
+               }
                optlen = optptr[1];
                if (optlen < 2 || optlen > l) {
                        pp_ptr = optptr;
index 8d3b6b0..b165568 100644 (file)
@@ -962,10 +962,6 @@ alloc_new_skb:
                                                           sk->sk_allocation);
                                if (unlikely(skb == NULL))
                                        err = -ENOBUFS;
-                               else
-                                       /* only the initial fragment is
-                                          time stamped */
-                                       cork->tx_flags = 0;
                        }
                        if (skb == NULL)
                                goto error;
@@ -976,7 +972,10 @@ alloc_new_skb:
                        skb->ip_summed = csummode;
                        skb->csum = 0;
                        skb_reserve(skb, hh_len);
+
+                       /* only the initial fragment is time stamped */
                        skb_shinfo(skb)->tx_flags = cork->tx_flags;
+                       cork->tx_flags = 0;
 
                        /*
                         *      Find where to start putting bytes.
index 64741b9..5cb830c 100644 (file)
@@ -1319,7 +1319,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
                if (sk->sk_type != SOCK_STREAM)
                        return -ENOPROTOOPT;
 
-               msg.msg_control = optval;
+               msg.msg_control = (__force void *) optval;
                msg.msg_controllen = len;
                msg.msg_flags = flags;
 
index 097b3e7..dd8c8c7 100644 (file)
@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
 {
        struct dst_entry *old_dst;
 
-       if (dst) {
-               if (dst->flags & DST_NOCACHE)
-                       dst = NULL;
-               else
-                       dst_clone(dst);
-       }
+       dst_clone(dst);
        old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
        dst_release(old_dst);
 }
@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
 
        rcu_read_lock();
        dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
+       if (dst && !atomic_inc_not_zero(&dst->__refcnt))
+               dst = NULL;
        if (dst) {
                if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
-                       rcu_read_unlock();
                        tunnel_dst_reset(t);
-                       return NULL;
+                       dst_release(dst);
+                       dst = NULL;
                }
-               dst_hold(dst);
        }
        rcu_read_unlock();
        return (struct rtable *)dst;
@@ -173,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (remote != t->parms.iph.daddr ||
+                   t->parms.iph.saddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
@@ -189,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
        head = &itn->tunnels[hash];
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
-               if ((local != t->parms.iph.saddr &&
-                    (local != t->parms.iph.daddr ||
-                     !ipv4_is_multicast(local))) ||
-                   !(t->dev->flags & IFF_UP))
+               if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
+                   (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
+                       continue;
+
+               if (!(t->dev->flags & IFF_UP))
                        continue;
 
                if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -209,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
 
        hlist_for_each_entry_rcu(t, head, hash_node) {
                if (t->parms.i_key != key ||
+                   t->parms.iph.saddr != 0 ||
+                   t->parms.iph.daddr != 0 ||
                    !(t->dev->flags & IFF_UP))
                        continue;
 
@@ -305,7 +305,7 @@ static struct net_device *__ip_tunnel_create(struct net *net,
        }
 
        ASSERT_RTNL();
-       dev = alloc_netdev(ops->priv_size, name, ops->setup);
+       dev = alloc_netdev(ops->priv_size, name, NET_NAME_UNKNOWN, ops->setup);
        if (!dev) {
                err = -ENOMEM;
                goto failed;
index b8960f3..e453cb7 100644 (file)
@@ -534,40 +534,28 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
 
 static int __init vti_init(void)
 {
+       const char *msg;
        int err;
 
-       pr_info("IPv4 over IPSec tunneling driver\n");
+       pr_info("IPv4 over IPsec tunneling driver\n");
 
+       msg = "tunnel device";
        err = register_pernet_device(&vti_net_ops);
        if (err < 0)
-               return err;
-       err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
-       if (err < 0) {
-               unregister_pernet_device(&vti_net_ops);
-               pr_info("vti init: can't register tunnel\n");
-
-               return err;
-       }
+               goto pernet_dev_failed;
 
+       msg = "tunnel protocols";
+       err = xfrm4_protocol_register(&vti_esp4_protocol, IPPROTO_ESP);
+       if (err < 0)
+               goto xfrm_proto_esp_failed;
        err = xfrm4_protocol_register(&vti_ah4_protocol, IPPROTO_AH);
-       if (err < 0) {
-               xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti_net_ops);
-               pr_info("vti init: can't register tunnel\n");
-
-               return err;
-       }
-
+       if (err < 0)
+               goto xfrm_proto_ah_failed;
        err = xfrm4_protocol_register(&vti_ipcomp4_protocol, IPPROTO_COMP);
-       if (err < 0) {
-               xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
-               xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
-               unregister_pernet_device(&vti_net_ops);
-               pr_info("vti init: can't register tunnel\n");
-
-               return err;
-       }
+       if (err < 0)
+               goto xfrm_proto_comp_failed;
 
+       msg = "netlink interface";
        err = rtnl_link_register(&vti_link_ops);
        if (err < 0)
                goto rtnl_link_failed;
@@ -576,23 +564,23 @@ static int __init vti_init(void)
 
 rtnl_link_failed:
        xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_proto_comp_failed:
        xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+xfrm_proto_ah_failed:
        xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
+xfrm_proto_esp_failed:
        unregister_pernet_device(&vti_net_ops);
+pernet_dev_failed:
+       pr_err("vti init: failed to register %s\n", msg);
        return err;
 }
 
 static void __exit vti_fini(void)
 {
        rtnl_link_unregister(&vti_link_ops);
-       if (xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP))
-               pr_info("vti close: can't deregister tunnel\n");
-       if (xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH))
-               pr_info("vti close: can't deregister tunnel\n");
-       if (xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP))
-               pr_info("vti close: can't deregister tunnel\n");
-
-
+       xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+       xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
+       xfrm4_protocol_deregister(&vti_esp4_protocol, IPPROTO_ESP);
        unregister_pernet_device(&vti_net_ops);
 }
 
index b3e86ea..5bbef4f 100644 (file)
@@ -143,8 +143,6 @@ __be32 ic_servaddr = NONE;  /* Boot server IP address */
 __be32 root_server_addr = NONE;        /* Address of NFS server */
 u8 root_server_path[256] = { 0, };     /* Path to mount as root */
 
-__be32 ic_dev_xid;             /* Device under configuration */
-
 /* vendor class identifier */
 static char vendor_class_identifier[253] __initdata;
 
@@ -654,6 +652,7 @@ static struct packet_type bootp_packet_type __initdata = {
        .func = ic_bootp_recv,
 };
 
+static __be32 ic_dev_xid;              /* Device under configuration */
 
 /*
  *  Initialize DHCP/BOOTP extension fields in the request.
@@ -1218,10 +1217,10 @@ static int __init ic_dynamic(void)
        get_random_bytes(&timeout, sizeof(timeout));
        timeout = CONF_BASE_TIMEOUT + (timeout % (unsigned int) CONF_TIMEOUT_RANDOM);
        for (;;) {
+#ifdef IPCONFIG_BOOTP
                /* Track the device we are configuring */
                ic_dev_xid = d->xid;
 
-#ifdef IPCONFIG_BOOTP
                if (do_bootp && (d->able & IC_BOOTP))
                        ic_bootp_send_if(d, jiffies - start_jiffies);
 #endif
index 65bcaa7..c803458 100644 (file)
@@ -500,7 +500,7 @@ static struct net_device *ipmr_reg_vif(struct net *net, struct mr_table *mrt)
        else
                sprintf(name, "pimreg%u", mrt->id);
 
-       dev = alloc_netdev(0, name, reg_vif_setup);
+       dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
 
        if (dev == NULL)
                return NULL;
index a26ce03..fb17312 100644 (file)
@@ -36,6 +36,16 @@ config NF_CONNTRACK_PROC_COMPAT
 
          If unsure, say Y.
 
+config NF_LOG_ARP
+       tristate "ARP packet logging"
+       default m if NETFILTER_ADVANCED=n
+       select NF_LOG_COMMON
+
+config NF_LOG_IPV4
+       tristate "IPv4 packet logging"
+       default m if NETFILTER_ADVANCED=n
+       select NF_LOG_COMMON
+
 config NF_TABLES_IPV4
        depends on NF_TABLES
        tristate "IPv4 nf_tables support"
@@ -159,25 +169,6 @@ config IP_NF_TARGET_SYNPROXY
 
          To compile it as a module, choose M here. If unsure, say N.
 
-config IP_NF_TARGET_ULOG
-       tristate "ULOG target support (obsolete)"
-       default m if NETFILTER_ADVANCED=n
-       ---help---
-
-         This option enables the old IPv4-only "ipt_ULOG" implementation
-         which has been obsoleted by the new "nfnetlink_log" code (see
-         CONFIG_NETFILTER_NETLINK_LOG).
-
-         This option adds a `ULOG' target, which allows you to create rules in
-         any iptables table. The packet is passed to a userspace logging
-         daemon using netlink multicast sockets; unlike the LOG target
-         which can only be viewed through syslog.
-
-         The appropriate userspace logging daemon (ulogd) may be obtained from
-         <http://www.netfilter.org/projects/ulogd/index.html>
-
-         To compile it as a module, choose M here.  If unsure, say N.
-
 # NAT + specific targets: nf_conntrack
 config NF_NAT_IPV4
        tristate "IPv4 NAT"
index 90b8240..3300162 100644 (file)
@@ -19,6 +19,10 @@ obj-$(CONFIG_NF_NAT_IPV4) += nf_nat_ipv4.o
 # defrag
 obj-$(CONFIG_NF_DEFRAG_IPV4) += nf_defrag_ipv4.o
 
+# logging
+obj-$(CONFIG_NF_LOG_ARP) += nf_log_arp.o
+obj-$(CONFIG_NF_LOG_IPV4) += nf_log_ipv4.o
+
 # NAT helpers (nf_conntrack)
 obj-$(CONFIG_NF_NAT_H323) += nf_nat_h323.o
 obj-$(CONFIG_NF_NAT_PPTP) += nf_nat_pptp.o
@@ -53,7 +57,6 @@ obj-$(CONFIG_IP_NF_TARGET_ECN) += ipt_ECN.o
 obj-$(CONFIG_IP_NF_TARGET_MASQUERADE) += ipt_MASQUERADE.o
 obj-$(CONFIG_IP_NF_TARGET_REJECT) += ipt_REJECT.o
 obj-$(CONFIG_IP_NF_TARGET_SYNPROXY) += ipt_SYNPROXY.o
-obj-$(CONFIG_IP_NF_TARGET_ULOG) += ipt_ULOG.o
 
 # generic ARP tables
 obj-$(CONFIG_IP_NF_ARPTABLES) += arp_tables.o
diff --git a/net/ipv4/netfilter/ipt_ULOG.c b/net/ipv4/netfilter/ipt_ULOG.c
deleted file mode 100644 (file)
index 9cb993c..0000000
+++ /dev/null
@@ -1,498 +0,0 @@
-/*
- * netfilter module for userspace packet logging daemons
- *
- * (C) 2000-2004 by Harald Welte <laforge@netfilter.org>
- * (C) 1999-2001 Paul `Rusty' Russell
- * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
- * (C) 2005-2007 Patrick McHardy <kaber@trash.net>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This module accepts two parameters:
- *
- * nlbufsiz:
- *   The parameter specifies how big the buffer for each netlink multicast
- * group is. e.g. If you say nlbufsiz=8192, up to eight kb of packets will
- * get accumulated in the kernel until they are sent to userspace. It is
- * NOT possible to allocate more than 128kB, and it is strongly discouraged,
- * because atomically allocating 128kB inside the network rx softirq is not
- * reliable. Please also keep in mind that this buffer size is allocated for
- * each nlgroup you are using, so the total kernel memory usage increases
- * by that factor.
- *
- * Actually you should use nlbufsiz a bit smaller than PAGE_SIZE, since
- * nlbufsiz is used with alloc_skb, which adds another
- * sizeof(struct skb_shared_info).  Use NLMSG_GOODSIZE instead.
- *
- * flushtimeout:
- *   Specify, after how many hundredths of a second the queue should be
- *   flushed even if it is not full yet.
- */
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-#include <linux/module.h>
-#include <linux/spinlock.h>
-#include <linux/socket.h>
-#include <linux/slab.h>
-#include <linux/skbuff.h>
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <net/netlink.h>
-#include <linux/netdevice.h>
-#include <linux/mm.h>
-#include <linux/moduleparam.h>
-#include <linux/netfilter.h>
-#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv4/ipt_ULOG.h>
-#include <net/netfilter/nf_log.h>
-#include <net/netns/generic.h>
-#include <net/sock.h>
-#include <linux/bitops.h>
-#include <asm/unaligned.h>
-
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Harald Welte <laforge@gnumonks.org>");
-MODULE_DESCRIPTION("Xtables: packet logging to netlink using ULOG");
-MODULE_ALIAS_NET_PF_PROTO(PF_NETLINK, NETLINK_NFLOG);
-
-#define ULOG_NL_EVENT          111             /* Harald's favorite number */
-#define ULOG_MAXNLGROUPS       32              /* numer of nlgroups */
-
-static unsigned int nlbufsiz = NLMSG_GOODSIZE;
-module_param(nlbufsiz, uint, 0400);
-MODULE_PARM_DESC(nlbufsiz, "netlink buffer size");
-
-static unsigned int flushtimeout = 10;
-module_param(flushtimeout, uint, 0600);
-MODULE_PARM_DESC(flushtimeout, "buffer flush timeout (hundredths of a second)");
-
-static bool nflog = true;
-module_param(nflog, bool, 0400);
-MODULE_PARM_DESC(nflog, "register as internal netfilter logging module");
-
-/* global data structures */
-
-typedef struct {
-       unsigned int qlen;              /* number of nlmsgs' in the skb */
-       struct nlmsghdr *lastnlh;       /* netlink header of last msg in skb */
-       struct sk_buff *skb;            /* the pre-allocated skb */
-       struct timer_list timer;        /* the timer function */
-} ulog_buff_t;
-
-static int ulog_net_id __read_mostly;
-struct ulog_net {
-       unsigned int nlgroup[ULOG_MAXNLGROUPS];
-       ulog_buff_t ulog_buffers[ULOG_MAXNLGROUPS];
-       struct sock *nflognl;
-       spinlock_t lock;
-};
-
-static struct ulog_net *ulog_pernet(struct net *net)
-{
-       return net_generic(net, ulog_net_id);
-}
-
-/* send one ulog_buff_t to userspace */
-static void ulog_send(struct ulog_net *ulog, unsigned int nlgroupnum)
-{
-       ulog_buff_t *ub = &ulog->ulog_buffers[nlgroupnum];
-
-       pr_debug("ulog_send: timer is deleting\n");
-       del_timer(&ub->timer);
-
-       if (!ub->skb) {
-               pr_debug("ulog_send: nothing to send\n");
-               return;
-       }
-
-       /* last nlmsg needs NLMSG_DONE */
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_type = NLMSG_DONE;
-
-       NETLINK_CB(ub->skb).dst_group = nlgroupnum + 1;
-       pr_debug("throwing %d packets to netlink group %u\n",
-                ub->qlen, nlgroupnum + 1);
-       netlink_broadcast(ulog->nflognl, ub->skb, 0, nlgroupnum + 1,
-                         GFP_ATOMIC);
-
-       ub->qlen = 0;
-       ub->skb = NULL;
-       ub->lastnlh = NULL;
-}
-
-
-/* timer function to flush queue in flushtimeout time */
-static void ulog_timer(unsigned long data)
-{
-       unsigned int groupnum = *((unsigned int *)data);
-       struct ulog_net *ulog = container_of((void *)data,
-                                            struct ulog_net,
-                                            nlgroup[groupnum]);
-       pr_debug("timer function called, calling ulog_send\n");
-
-       /* lock to protect against somebody modifying our structure
-        * from ipt_ulog_target at the same time */
-       spin_lock_bh(&ulog->lock);
-       ulog_send(ulog, groupnum);
-       spin_unlock_bh(&ulog->lock);
-}
-
-static struct sk_buff *ulog_alloc_skb(unsigned int size)
-{
-       struct sk_buff *skb;
-       unsigned int n;
-
-       /* alloc skb which should be big enough for a whole
-        * multipart message. WARNING: has to be <= 131000
-        * due to slab allocator restrictions */
-
-       n = max(size, nlbufsiz);
-       skb = alloc_skb(n, GFP_ATOMIC | __GFP_NOWARN);
-       if (!skb) {
-               if (n > size) {
-                       /* try to allocate only as much as we need for
-                        * current packet */
-
-                       skb = alloc_skb(size, GFP_ATOMIC);
-                       if (!skb)
-                               pr_debug("cannot even allocate %ub\n", size);
-               }
-       }
-
-       return skb;
-}
-
-static void ipt_ulog_packet(struct net *net,
-                           unsigned int hooknum,
-                           const struct sk_buff *skb,
-                           const struct net_device *in,
-                           const struct net_device *out,
-                           const struct ipt_ulog_info *loginfo,
-                           const char *prefix)
-{
-       ulog_buff_t *ub;
-       ulog_packet_msg_t *pm;
-       size_t size, copy_len;
-       struct nlmsghdr *nlh;
-       struct timeval tv;
-       struct ulog_net *ulog = ulog_pernet(net);
-
-       /* ffs == find first bit set, necessary because userspace
-        * is already shifting groupnumber, but we need unshifted.
-        * ffs() returns [1..32], we need [0..31] */
-       unsigned int groupnum = ffs(loginfo->nl_group) - 1;
-
-       /* calculate the size of the skb needed */
-       if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
-               copy_len = skb->len;
-       else
-               copy_len = loginfo->copy_range;
-
-       size = nlmsg_total_size(sizeof(*pm) + copy_len);
-
-       ub = &ulog->ulog_buffers[groupnum];
-
-       spin_lock_bh(&ulog->lock);
-
-       if (!ub->skb) {
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto alloc_failure;
-       } else if (ub->qlen >= loginfo->qthreshold ||
-                  size > skb_tailroom(ub->skb)) {
-               /* either the queue len is too high or we don't have
-                * enough room in nlskb left. send it to userspace. */
-
-               ulog_send(ulog, groupnum);
-
-               if (!(ub->skb = ulog_alloc_skb(size)))
-                       goto alloc_failure;
-       }
-
-       pr_debug("qlen %d, qthreshold %Zu\n", ub->qlen, loginfo->qthreshold);
-
-       nlh = nlmsg_put(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
-                       sizeof(*pm)+copy_len, 0);
-       if (!nlh) {
-               pr_debug("error during nlmsg_put\n");
-               goto out_unlock;
-       }
-       ub->qlen++;
-
-       pm = nlmsg_data(nlh);
-       memset(pm, 0, sizeof(*pm));
-
-       /* We might not have a timestamp, get one */
-       if (skb->tstamp.tv64 == 0)
-               __net_timestamp((struct sk_buff *)skb);
-
-       /* copy hook, prefix, timestamp, payload, etc. */
-       pm->data_len = copy_len;
-       tv = ktime_to_timeval(skb->tstamp);
-       put_unaligned(tv.tv_sec, &pm->timestamp_sec);
-       put_unaligned(tv.tv_usec, &pm->timestamp_usec);
-       put_unaligned(skb->mark, &pm->mark);
-       pm->hook = hooknum;
-       if (prefix != NULL) {
-               strncpy(pm->prefix, prefix, sizeof(pm->prefix) - 1);
-               pm->prefix[sizeof(pm->prefix) - 1] = '\0';
-       }
-       else if (loginfo->prefix[0] != '\0')
-               strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
-
-       if (in && in->hard_header_len > 0 &&
-           skb->mac_header != skb->network_header &&
-           in->hard_header_len <= ULOG_MAC_LEN) {
-               memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
-               pm->mac_len = in->hard_header_len;
-       } else
-               pm->mac_len = 0;
-
-       if (in)
-               strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
-
-       if (out)
-               strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
-
-       /* copy_len <= skb->len, so can't fail. */
-       if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
-               BUG();
-
-       /* check if we are building multi-part messages */
-       if (ub->qlen > 1)
-               ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
-
-       ub->lastnlh = nlh;
-
-       /* if timer isn't already running, start it */
-       if (!timer_pending(&ub->timer)) {
-               ub->timer.expires = jiffies + flushtimeout * HZ / 100;
-               add_timer(&ub->timer);
-       }
-
-       /* if threshold is reached, send message to userspace */
-       if (ub->qlen >= loginfo->qthreshold) {
-               if (loginfo->qthreshold > 1)
-                       nlh->nlmsg_type = NLMSG_DONE;
-               ulog_send(ulog, groupnum);
-       }
-out_unlock:
-       spin_unlock_bh(&ulog->lock);
-
-       return;
-
-alloc_failure:
-       pr_debug("Error building netlink message\n");
-       spin_unlock_bh(&ulog->lock);
-}
-
-static unsigned int
-ulog_tg(struct sk_buff *skb, const struct xt_action_param *par)
-{
-       struct net *net = dev_net(par->in ? par->in : par->out);
-
-       ipt_ulog_packet(net, par->hooknum, skb, par->in, par->out,
-                       par->targinfo, NULL);
-       return XT_CONTINUE;
-}
-
-static void ipt_logfn(struct net *net,
-                     u_int8_t pf,
-                     unsigned int hooknum,
-                     const struct sk_buff *skb,
-                     const struct net_device *in,
-                     const struct net_device *out,
-                     const struct nf_loginfo *li,
-                     const char *prefix)
-{
-       struct ipt_ulog_info loginfo;
-
-       if (!li || li->type != NF_LOG_TYPE_ULOG) {
-               loginfo.nl_group = ULOG_DEFAULT_NLGROUP;
-               loginfo.copy_range = 0;
-               loginfo.qthreshold = ULOG_DEFAULT_QTHRESHOLD;
-               loginfo.prefix[0] = '\0';
-       } else {
-               loginfo.nl_group = li->u.ulog.group;
-               loginfo.copy_range = li->u.ulog.copy_len;
-               loginfo.qthreshold = li->u.ulog.qthreshold;
-               strlcpy(loginfo.prefix, prefix, sizeof(loginfo.prefix));
-       }
-
-       ipt_ulog_packet(net, hooknum, skb, in, out, &loginfo, prefix);
-}
-
-static int ulog_tg_check(const struct xt_tgchk_param *par)
-{
-       const struct ipt_ulog_info *loginfo = par->targinfo;
-
-       if (!par->net->xt.ulog_warn_deprecated) {
-               pr_info("ULOG is deprecated and it will be removed soon, "
-                       "use NFLOG instead\n");
-               par->net->xt.ulog_warn_deprecated = true;
-       }
-
-       if (loginfo->prefix[sizeof(loginfo->prefix) - 1] != '\0') {
-               pr_debug("prefix not null-terminated\n");
-               return -EINVAL;
-       }
-       if (loginfo->qthreshold > ULOG_MAX_QLEN) {
-               pr_debug("queue threshold %Zu > MAX_QLEN\n",
-                        loginfo->qthreshold);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-#ifdef CONFIG_COMPAT
-struct compat_ipt_ulog_info {
-       compat_uint_t   nl_group;
-       compat_size_t   copy_range;
-       compat_size_t   qthreshold;
-       char            prefix[ULOG_PREFIX_LEN];
-};
-
-static void ulog_tg_compat_from_user(void *dst, const void *src)
-{
-       const struct compat_ipt_ulog_info *cl = src;
-       struct ipt_ulog_info l = {
-               .nl_group       = cl->nl_group,
-               .copy_range     = cl->copy_range,
-               .qthreshold     = cl->qthreshold,
-       };
-
-       memcpy(l.prefix, cl->prefix, sizeof(l.prefix));
-       memcpy(dst, &l, sizeof(l));
-}
-
-static int ulog_tg_compat_to_user(void __user *dst, const void *src)
-{
-       const struct ipt_ulog_info *l = src;
-       struct compat_ipt_ulog_info cl = {
-               .nl_group       = l->nl_group,
-               .copy_range     = l->copy_range,
-               .qthreshold     = l->qthreshold,
-       };
-
-       memcpy(cl.prefix, l->prefix, sizeof(cl.prefix));
-       return copy_to_user(dst, &cl, sizeof(cl)) ? -EFAULT : 0;
-}
-#endif /* CONFIG_COMPAT */
-
-static struct xt_target ulog_tg_reg __read_mostly = {
-       .name           = "ULOG",
-       .family         = NFPROTO_IPV4,
-       .target         = ulog_tg,
-       .targetsize     = sizeof(struct ipt_ulog_info),
-       .checkentry     = ulog_tg_check,
-#ifdef CONFIG_COMPAT
-       .compatsize     = sizeof(struct compat_ipt_ulog_info),
-       .compat_from_user = ulog_tg_compat_from_user,
-       .compat_to_user = ulog_tg_compat_to_user,
-#endif
-       .me             = THIS_MODULE,
-};
-
-static struct nf_logger ipt_ulog_logger __read_mostly = {
-       .name           = "ipt_ULOG",
-       .logfn          = ipt_logfn,
-       .me             = THIS_MODULE,
-};
-
-static int __net_init ulog_tg_net_init(struct net *net)
-{
-       int i;
-       struct ulog_net *ulog = ulog_pernet(net);
-       struct netlink_kernel_cfg cfg = {
-               .groups = ULOG_MAXNLGROUPS,
-       };
-
-       spin_lock_init(&ulog->lock);
-       /* initialize ulog_buffers */
-       for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
-               ulog->nlgroup[i] = i;
-               setup_timer(&ulog->ulog_buffers[i].timer, ulog_timer,
-                           (unsigned long)&ulog->nlgroup[i]);
-       }
-
-       ulog->nflognl = netlink_kernel_create(net, NETLINK_NFLOG, &cfg);
-       if (!ulog->nflognl)
-               return -ENOMEM;
-
-       if (nflog)
-               nf_log_set(net, NFPROTO_IPV4, &ipt_ulog_logger);
-
-       return 0;
-}
-
-static void __net_exit ulog_tg_net_exit(struct net *net)
-{
-       ulog_buff_t *ub;
-       int i;
-       struct ulog_net *ulog = ulog_pernet(net);
-
-       if (nflog)
-               nf_log_unset(net, &ipt_ulog_logger);
-
-       netlink_kernel_release(ulog->nflognl);
-
-       /* remove pending timers and free allocated skb's */
-       for (i = 0; i < ULOG_MAXNLGROUPS; i++) {
-               ub = &ulog->ulog_buffers[i];
-               pr_debug("timer is deleting\n");
-               del_timer(&ub->timer);
-
-               if (ub->skb) {
-                       kfree_skb(ub->skb);
-                       ub->skb = NULL;
-               }
-       }
-}
-
-static struct pernet_operations ulog_tg_net_ops = {
-       .init = ulog_tg_net_init,
-       .exit = ulog_tg_net_exit,
-       .id   = &ulog_net_id,
-       .size = sizeof(struct ulog_net),
-};
-
-static int __init ulog_tg_init(void)
-{
-       int ret;
-       pr_debug("init module\n");
-
-       if (nlbufsiz > 128*1024) {
-               pr_warn("Netlink buffer has to be <= 128kB\n");
-               return -EINVAL;
-       }
-
-       ret = register_pernet_subsys(&ulog_tg_net_ops);
-       if (ret)
-               goto out_pernet;
-
-       ret = xt_register_target(&ulog_tg_reg);
-       if (ret < 0)
-               goto out_target;
-
-       if (nflog)
-               nf_log_register(NFPROTO_IPV4, &ipt_ulog_logger);
-
-       return 0;
-
-out_target:
-       unregister_pernet_subsys(&ulog_tg_net_ops);
-out_pernet:
-       return ret;
-}
-
-static void __exit ulog_tg_exit(void)
-{
-       pr_debug("cleanup_module\n");
-       if (nflog)
-               nf_log_unregister(&ipt_ulog_logger);
-       xt_unregister_target(&ulog_tg_reg);
-       unregister_pernet_subsys(&ulog_tg_net_ops);
-}
-
-module_init(ulog_tg_init);
-module_exit(ulog_tg_exit);
index 8127dc8..a054fe0 100644 (file)
@@ -314,7 +314,7 @@ getorigdst(struct sock *sk, int optval, void __user *user, int *len)
        return -ENOENT;
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -358,7 +358,7 @@ static struct nf_sockopt_ops so_getorigdst = {
        .pf             = PF_INET,
        .get_optmin     = SO_ORIGINAL_DST,
        .get_optmax     = SO_ORIGINAL_DST+1,
-       .get            = &getorigdst,
+       .get            = getorigdst,
        .owner          = THIS_MODULE,
 };
 
@@ -388,7 +388,7 @@ struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 __read_mostly = {
        .invert_tuple    = ipv4_invert_tuple,
        .print_tuple     = ipv4_print_tuple,
        .get_l4proto     = ipv4_get_l4proto,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr = ipv4_tuple_to_nlattr,
        .nlattr_tuple_size = ipv4_nlattr_tuple_size,
        .nlattr_to_tuple = ipv4_nlattr_to_tuple,
index a338dad..b91b264 100644 (file)
@@ -226,7 +226,7 @@ icmp_error(struct net *net, struct nf_conn *tmpl,
        return icmp_error_message(net, tmpl, skb, ctinfo, hooknum);
 }
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -408,7 +408,7 @@ struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp __read_mostly =
        .error                  = icmp_error,
        .destroy                = NULL,
        .me                     = NULL,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .tuple_to_nlattr        = icmp_tuple_to_nlattr,
        .nlattr_tuple_size      = icmp_nlattr_tuple_size,
        .nlattr_to_tuple        = icmp_nlattr_to_tuple,
index b8f6381..76bd1ae 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/netfilter_bridge.h>
 #include <linux/netfilter_ipv4.h>
 #include <net/netfilter/ipv4/nf_defrag_ipv4.h>
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
 #include <net/netfilter/nf_conntrack.h>
 #endif
 #include <net/netfilter/nf_conntrack_zones.h>
@@ -45,7 +45,7 @@ static enum ip_defrag_users nf_ct_defrag_user(unsigned int hooknum,
 {
        u16 zone = NF_CT_DEFAULT_ZONE;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
        if (skb->nfct)
                zone = nf_ct_zone((struct nf_conn *)skb->nfct);
 #endif
@@ -74,8 +74,8 @@ static unsigned int ipv4_conntrack_defrag(const struct nf_hook_ops *ops,
            inet->nodefrag)
                return NF_ACCEPT;
 
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-#if !defined(CONFIG_NF_NAT) && !defined(CONFIG_NF_NAT_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#if !IS_ENABLED(CONFIG_NF_NAT)
        /* Previously seen (loopback)?  Ignore.  Do this before
           fragment check. */
        if (skb->nfct && !nf_ct_is_template((struct nf_conn *)skb->nfct))
diff --git a/net/ipv4/netfilter/nf_log_arp.c b/net/ipv4/netfilter/nf_log_arp.c
new file mode 100644 (file)
index 0000000..ccfc78d
--- /dev/null
@@ -0,0 +1,149 @@
+/*
+ * (C) 2014 by Pablo Neira Ayuso <pablo@netfilter.org>
+ *
+ * Based on code from ebt_log from:
+ *
+ * Bart De Schuymer <bdschuym@pandora.be>
+ * Harald Welte <laforge@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+static struct nf_loginfo default_loginfo = {
+       .type   = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level    = 5,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+struct arppayload {
+       unsigned char mac_src[ETH_ALEN];
+       unsigned char ip_src[4];
+       unsigned char mac_dst[ETH_ALEN];
+       unsigned char ip_dst[4];
+};
+
+static void dump_arp_packet(struct nf_log_buf *m,
+                           const struct nf_loginfo *info,
+                           const struct sk_buff *skb, unsigned int nhoff)
+{
+       const struct arphdr *ah;
+       struct arphdr _arph;
+       const struct arppayload *ap;
+       struct arppayload _arpp;
+
+       ah = skb_header_pointer(skb, 0, sizeof(_arph), &_arph);
+       if (ah == NULL) {
+               nf_log_buf_add(m, "TRUNCATED");
+               return;
+       }
+       nf_log_buf_add(m, "ARP HTYPE=%d PTYPE=0x%04x OPCODE=%d",
+                      ntohs(ah->ar_hrd), ntohs(ah->ar_pro), ntohs(ah->ar_op));
+
+       /* If it's for Ethernet and the lengths are OK, then log the ARP
+        * payload.
+        */
+       if (ah->ar_hrd != htons(1) ||
+           ah->ar_hln != ETH_ALEN ||
+           ah->ar_pln != sizeof(__be32))
+               return;
+
+       ap = skb_header_pointer(skb, sizeof(_arph), sizeof(_arpp), &_arpp);
+       if (ap == NULL) {
+               nf_log_buf_add(m, " INCOMPLETE [%Zu bytes]",
+                              skb->len - sizeof(_arph));
+               return;
+       }
+       nf_log_buf_add(m, " MACSRC=%pM IPSRC=%pI4 MACDST=%pM IPDST=%pI4",
+                      ap->mac_src, ap->ip_src, ap->mac_dst, ap->ip_dst);
+}
+
+void nf_log_arp_packet(struct net *net, u_int8_t pf,
+                     unsigned int hooknum, const struct sk_buff *skb,
+                     const struct net_device *in,
+                     const struct net_device *out,
+                     const struct nf_loginfo *loginfo,
+                     const char *prefix)
+{
+       struct nf_log_buf *m;
+
+       /* FIXME: Disabled from containers until syslog ns is supported */
+       if (!net_eq(net, &init_net))
+               return;
+
+       m = nf_log_buf_open();
+
+       if (!loginfo)
+               loginfo = &default_loginfo;
+
+       nf_log_dump_packet_common(m, pf, hooknum, skb, in, out, loginfo,
+                                 prefix);
+       dump_arp_packet(m, loginfo, skb, 0);
+
+       nf_log_buf_close(m);
+}
+
+static struct nf_logger nf_arp_logger __read_mostly = {
+       .name           = "nf_log_arp",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_arp_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_arp_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_ARP, &nf_arp_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_arp_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_arp_logger);
+}
+
+static struct pernet_operations nf_log_arp_net_ops = {
+       .init = nf_log_arp_net_init,
+       .exit = nf_log_arp_net_exit,
+};
+
+static int __init nf_log_arp_init(void)
+{
+       int ret;
+
+       ret = register_pernet_subsys(&nf_log_arp_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_ARP, &nf_arp_logger);
+       return 0;
+}
+
+static void __exit nf_log_arp_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_arp_net_ops);
+       nf_log_unregister(&nf_arp_logger);
+}
+
+module_init(nf_log_arp_init);
+module_exit(nf_log_arp_exit);
+
+MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter ARP packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(3, 0);
diff --git a/net/ipv4/netfilter/nf_log_ipv4.c b/net/ipv4/netfilter/nf_log_ipv4.c
new file mode 100644 (file)
index 0000000..078bdca
--- /dev/null
@@ -0,0 +1,385 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+static struct nf_loginfo default_loginfo = {
+       .type   = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level    = 5,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+/* One level of recursion won't kill us */
+static void dump_ipv4_packet(struct nf_log_buf *m,
+                            const struct nf_loginfo *info,
+                            const struct sk_buff *skb, unsigned int iphoff)
+{
+       struct iphdr _iph;
+       const struct iphdr *ih;
+       unsigned int logflags;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+       else
+               logflags = NF_LOG_MASK;
+
+       ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
+       if (ih == NULL) {
+               nf_log_buf_add(m, "TRUNCATED");
+               return;
+       }
+
+       /* Important fields:
+        * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
+       /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
+       nf_log_buf_add(m, "SRC=%pI4 DST=%pI4 ", &ih->saddr, &ih->daddr);
+
+       /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
+       nf_log_buf_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
+                      ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
+                      ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
+
+       /* Max length: 6 "CE DF MF " */
+       if (ntohs(ih->frag_off) & IP_CE)
+               nf_log_buf_add(m, "CE ");
+       if (ntohs(ih->frag_off) & IP_DF)
+               nf_log_buf_add(m, "DF ");
+       if (ntohs(ih->frag_off) & IP_MF)
+               nf_log_buf_add(m, "MF ");
+
+       /* Max length: 11 "FRAG:65535 " */
+       if (ntohs(ih->frag_off) & IP_OFFSET)
+               nf_log_buf_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
+
+       if ((logflags & XT_LOG_IPOPT) &&
+           ih->ihl * 4 > sizeof(struct iphdr)) {
+               const unsigned char *op;
+               unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
+               unsigned int i, optsize;
+
+               optsize = ih->ihl * 4 - sizeof(struct iphdr);
+               op = skb_header_pointer(skb, iphoff+sizeof(_iph),
+                                       optsize, _opt);
+               if (op == NULL) {
+                       nf_log_buf_add(m, "TRUNCATED");
+                       return;
+               }
+
+               /* Max length: 127 "OPT (" 15*4*2chars ") " */
+               nf_log_buf_add(m, "OPT (");
+               for (i = 0; i < optsize; i++)
+                       nf_log_buf_add(m, "%02X", op[i]);
+               nf_log_buf_add(m, ") ");
+       }
+
+       switch (ih->protocol) {
+       case IPPROTO_TCP:
+               if (nf_log_dump_tcp_header(m, skb, ih->protocol,
+                                          ntohs(ih->frag_off) & IP_OFFSET,
+                                          iphoff+ih->ihl*4, logflags))
+                       return;
+               break;
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE:
+               if (nf_log_dump_udp_header(m, skb, ih->protocol,
+                                          ntohs(ih->frag_off) & IP_OFFSET,
+                                          iphoff+ih->ihl*4))
+                       return;
+               break;
+       case IPPROTO_ICMP: {
+               struct icmphdr _icmph;
+               const struct icmphdr *ich;
+               static const size_t required_len[NR_ICMP_TYPES+1]
+                       = { [ICMP_ECHOREPLY] = 4,
+                           [ICMP_DEST_UNREACH]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_SOURCE_QUENCH]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_REDIRECT]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_ECHO] = 4,
+                           [ICMP_TIME_EXCEEDED]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_PARAMETERPROB]
+                           = 8 + sizeof(struct iphdr),
+                           [ICMP_TIMESTAMP] = 20,
+                           [ICMP_TIMESTAMPREPLY] = 20,
+                           [ICMP_ADDRESS] = 12,
+                           [ICMP_ADDRESSREPLY] = 12 };
+
+               /* Max length: 11 "PROTO=ICMP " */
+               nf_log_buf_add(m, "PROTO=ICMP ");
+
+               if (ntohs(ih->frag_off) & IP_OFFSET)
+                       break;
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
+                                        sizeof(_icmph), &_icmph);
+               if (ich == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               /* Max length: 18 "TYPE=255 CODE=255 " */
+               nf_log_buf_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               if (ich->type <= NR_ICMP_TYPES &&
+                   required_len[ich->type] &&
+                   skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               switch (ich->type) {
+               case ICMP_ECHOREPLY:
+               case ICMP_ECHO:
+                       /* Max length: 19 "ID=65535 SEQ=65535 " */
+                       nf_log_buf_add(m, "ID=%u SEQ=%u ",
+                                      ntohs(ich->un.echo.id),
+                                      ntohs(ich->un.echo.sequence));
+                       break;
+
+               case ICMP_PARAMETERPROB:
+                       /* Max length: 14 "PARAMETER=255 " */
+                       nf_log_buf_add(m, "PARAMETER=%u ",
+                                      ntohl(ich->un.gateway) >> 24);
+                       break;
+               case ICMP_REDIRECT:
+                       /* Max length: 24 "GATEWAY=255.255.255.255 " */
+                       nf_log_buf_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
+                       /* Fall through */
+               case ICMP_DEST_UNREACH:
+               case ICMP_SOURCE_QUENCH:
+               case ICMP_TIME_EXCEEDED:
+                       /* Max length: 3+maxlen */
+                       if (!iphoff) { /* Only recurse once. */
+                               nf_log_buf_add(m, "[");
+                               dump_ipv4_packet(m, info, skb,
+                                           iphoff + ih->ihl*4+sizeof(_icmph));
+                               nf_log_buf_add(m, "] ");
+                       }
+
+                       /* Max length: 10 "MTU=65535 " */
+                       if (ich->type == ICMP_DEST_UNREACH &&
+                           ich->code == ICMP_FRAG_NEEDED) {
+                               nf_log_buf_add(m, "MTU=%u ",
+                                              ntohs(ich->un.frag.mtu));
+                       }
+               }
+               break;
+       }
+       /* Max Length */
+       case IPPROTO_AH: {
+               struct ip_auth_hdr _ahdr;
+               const struct ip_auth_hdr *ah;
+
+               if (ntohs(ih->frag_off) & IP_OFFSET)
+                       break;
+
+               /* Max length: 9 "PROTO=AH " */
+               nf_log_buf_add(m, "PROTO=AH ");
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
+                                       sizeof(_ahdr), &_ahdr);
+               if (ah == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               /* Length: 15 "SPI=0xF1234567 " */
+               nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
+               break;
+       }
+       case IPPROTO_ESP: {
+               struct ip_esp_hdr _esph;
+               const struct ip_esp_hdr *eh;
+
+               /* Max length: 10 "PROTO=ESP " */
+               nf_log_buf_add(m, "PROTO=ESP ");
+
+               if (ntohs(ih->frag_off) & IP_OFFSET)
+                       break;
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
+                                       sizeof(_esph), &_esph);
+               if (eh == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - iphoff - ih->ihl*4);
+                       break;
+               }
+
+               /* Length: 15 "SPI=0xF1234567 " */
+               nf_log_buf_add(m, "SPI=0x%x ", ntohl(eh->spi));
+               break;
+       }
+       /* Max length: 10 "PROTO 255 " */
+       default:
+               nf_log_buf_add(m, "PROTO=%u ", ih->protocol);
+       }
+
+       /* Max length: 15 "UID=4294967295 " */
+       if ((logflags & XT_LOG_UID) && !iphoff)
+               nf_log_dump_sk_uid_gid(m, skb->sk);
+
+       /* Max length: 16 "MARK=0xFFFFFFFF " */
+       if (!iphoff && skb->mark)
+               nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
+
+       /* Proto    Max log string length */
+       /* IP:      40+46+6+11+127 = 230 */
+       /* TCP:     10+max(25,20+30+13+9+32+11+127) = 252 */
+       /* UDP:     10+max(25,20) = 35 */
+       /* UDPLITE: 14+max(25,20) = 39 */
+       /* ICMP:    11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
+       /* ESP:     10+max(25)+15 = 50 */
+       /* AH:      9+max(25)+15 = 49 */
+       /* unknown: 10 */
+
+       /* (ICMP allows recursion one level deep) */
+       /* maxlen =  IP + ICMP +  IP + max(TCP,UDP,ICMP,unknown) */
+       /* maxlen = 230+   91  + 230 + 252 = 803 */
+}
+
+static void dump_ipv4_mac_header(struct nf_log_buf *m,
+                           const struct nf_loginfo *info,
+                           const struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       unsigned int logflags = 0;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+
+       if (!(logflags & XT_LOG_MACDECODE))
+               goto fallback;
+
+       switch (dev->type) {
+       case ARPHRD_ETHER:
+               nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+                              eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+                              ntohs(eth_hdr(skb)->h_proto));
+               return;
+       default:
+               break;
+       }
+
+fallback:
+       nf_log_buf_add(m, "MAC=");
+       if (dev->hard_header_len &&
+           skb->mac_header != skb->network_header) {
+               const unsigned char *p = skb_mac_header(skb);
+               unsigned int i;
+
+               nf_log_buf_add(m, "%02x", *p++);
+               for (i = 1; i < dev->hard_header_len; i++, p++)
+                       nf_log_buf_add(m, ":%02x", *p);
+       }
+       nf_log_buf_add(m, " ");
+}
+
+static void nf_log_ip_packet(struct net *net, u_int8_t pf,
+                            unsigned int hooknum, const struct sk_buff *skb,
+                            const struct net_device *in,
+                            const struct net_device *out,
+                            const struct nf_loginfo *loginfo,
+                            const char *prefix)
+{
+       struct nf_log_buf *m;
+
+       /* FIXME: Disabled from containers until syslog ns is supported */
+       if (!net_eq(net, &init_net))
+               return;
+
+       m = nf_log_buf_open();
+
+       if (!loginfo)
+               loginfo = &default_loginfo;
+
+       nf_log_dump_packet_common(m, pf, hooknum, skb, in,
+                                 out, loginfo, prefix);
+
+       if (in != NULL)
+               dump_ipv4_mac_header(m, loginfo, skb);
+
+       dump_ipv4_packet(m, loginfo, skb, 0);
+
+       nf_log_buf_close(m);
+}
+
+static struct nf_logger nf_ip_logger __read_mostly = {
+       .name           = "nf_log_ipv4",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_ip_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_ipv4_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_IPV4, &nf_ip_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_ipv4_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_ip_logger);
+}
+
+static struct pernet_operations nf_log_ipv4_net_ops = {
+       .init = nf_log_ipv4_net_init,
+       .exit = nf_log_ipv4_net_exit,
+};
+
+static int __init nf_log_ipv4_init(void)
+{
+       int ret;
+
+       ret = register_pernet_subsys(&nf_log_ipv4_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_IPV4, &nf_ip_logger);
+       return 0;
+}
+
+static void __exit nf_log_ipv4_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_ipv4_net_ops);
+       nf_log_unregister(&nf_ip_logger);
+}
+
+module_init(nf_log_ipv4_init);
+module_exit(nf_log_ipv4_exit);
+
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(AF_INET, 0);
index d8b2e14..14f5ccd 100644 (file)
@@ -154,6 +154,7 @@ static void nf_nat_ipv4_csum_recalc(struct sk_buff *skb,
                                         htons(oldlen), htons(datalen), 1);
 }
 
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
                                       struct nf_nat_range *range)
 {
@@ -169,6 +170,7 @@ static int nf_nat_ipv4_nlattr_to_range(struct nlattr *tb[],
 
        return 0;
 }
+#endif
 
 static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
        .l3proto                = NFPROTO_IPV4,
@@ -177,7 +179,9 @@ static const struct nf_nat_l3proto nf_nat_l3proto_ipv4 = {
        .manip_pkt              = nf_nat_ipv4_manip_pkt,
        .csum_update            = nf_nat_ipv4_csum_update,
        .csum_recalc            = nf_nat_ipv4_csum_recalc,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_ipv4_nlattr_to_range,
+#endif
 #ifdef CONFIG_XFRM
        .decode_session         = nf_nat_ipv4_decode_session,
 #endif
index 690d890..9414923 100644 (file)
@@ -124,7 +124,7 @@ static const struct nf_nat_l4proto gre = {
        .manip_pkt              = gre_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = gre_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index eb30347..4557b4a 100644 (file)
@@ -77,7 +77,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_icmp = {
        .manip_pkt              = icmp_manip_pkt,
        .in_range               = icmp_in_range,
        .unique_tuple           = icmp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 044a0dd..a3c59a0 100644 (file)
@@ -911,7 +911,7 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                                sin6->sin6_flowinfo = ip6_flowinfo(ip6);
                        sin6->sin6_scope_id =
                                ipv6_iface_scope_id(&sin6->sin6_addr,
-                                                   IP6CB(skb)->iif);
+                                                   inet6_iif(skb));
                        *addr_len = sizeof(*sin6);
                }
 
index ae0af93..8e3eb39 100644 (file)
@@ -52,6 +52,7 @@
 static int sockstat_seq_show(struct seq_file *seq, void *v)
 {
        struct net *net = seq->private;
+       unsigned int frag_mem;
        int orphans, sockets;
 
        local_bh_disable();
@@ -71,8 +72,8 @@ static int sockstat_seq_show(struct seq_file *seq, void *v)
                   sock_prot_inuse_get(net, &udplite_prot));
        seq_printf(seq, "RAW: inuse %d\n",
                   sock_prot_inuse_get(net, &raw_prot));
-       seq_printf(seq,  "FRAG: inuse %d memory %d\n",
-                       ip_frag_nqueues(net), ip_frag_mem(net));
+       frag_mem = ip_frag_mem(net);
+       seq_printf(seq,  "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
        return 0;
 }
 
index 2c65160..739db31 100644 (file)
@@ -58,6 +58,7 @@
 #include <linux/in_route.h>
 #include <linux/route.h>
 #include <linux/skbuff.h>
+#include <linux/igmp.h>
 #include <net/net_namespace.h>
 #include <net/dst.h>
 #include <net/sock.h>
@@ -174,7 +175,9 @@ static int raw_v4_input(struct sk_buff *skb, const struct iphdr *iph, int hash)
 
        while (sk) {
                delivered = 1;
-               if (iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) {
+               if ((iph->protocol != IPPROTO_ICMP || !icmp_filter(sk, skb)) &&
+                   ip_mc_sf_allow(sk, iph->daddr, iph->saddr,
+                                  skb->dev->ifindex)) {
                        struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
 
                        /* Not releasing hash table! */
@@ -365,6 +368,8 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
 
        skb->ip_summed = CHECKSUM_NONE;
 
+       sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
+
        skb->transport_header = skb->network_header;
        err = -EFAULT;
        if (memcpy_fromiovecend((void *)iph, from, 0, length))
@@ -606,6 +611,8 @@ back_from_confirm:
                                      &rt, msg->msg_flags);
 
         else {
+               sock_tx_timestamp(sk, &ipc.tx_flags);
+
                if (!ipc.addr)
                        ipc.addr = fl4.daddr;
                lock_sock(sk);
index 082239f..1901998 100644 (file)
@@ -457,8 +457,31 @@ static struct neighbour *ipv4_neigh_lookup(const struct dst_entry *dst,
        return neigh_create(&arp_tbl, pkey, dev);
 }
 
-atomic_t *ip_idents __read_mostly;
-EXPORT_SYMBOL(ip_idents);
+#define IP_IDENTS_SZ 2048u
+struct ip_ident_bucket {
+       atomic_t        id;
+       u32             stamp32;
+};
+
+static struct ip_ident_bucket *ip_idents __read_mostly;
+
+/* In order to protect privacy, we add a perturbation to identifiers
+ * if one generator is seldom used. This makes hard for an attacker
+ * to infer how many packets were sent between two points in time.
+ */
+u32 ip_idents_reserve(u32 hash, int segs)
+{
+       struct ip_ident_bucket *bucket = ip_idents + hash % IP_IDENTS_SZ;
+       u32 old = ACCESS_ONCE(bucket->stamp32);
+       u32 now = (u32)jiffies;
+       u32 delta = 0;
+
+       if (old != now && cmpxchg(&bucket->stamp32, old, now) == old)
+               delta = prandom_u32_max(now - old);
+
+       return atomic_add_return(segs + delta, &bucket->id) - segs;
+}
+EXPORT_SYMBOL(ip_idents_reserve);
 
 void __ip_select_ident(struct iphdr *iph, int segs)
 {
@@ -467,7 +490,10 @@ void __ip_select_ident(struct iphdr *iph, int segs)
 
        net_get_random_once(&ip_idents_hashrnd, sizeof(ip_idents_hashrnd));
 
-       hash = jhash_1word((__force u32)iph->daddr, ip_idents_hashrnd);
+       hash = jhash_3words((__force u32)iph->daddr,
+                           (__force u32)iph->saddr,
+                           iph->protocol,
+                           ip_idents_hashrnd);
        id = ip_idents_reserve(hash, segs);
        iph->id = htons(id);
 }
@@ -1010,7 +1036,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        const struct iphdr *iph = (const struct iphdr *) skb->data;
        struct flowi4 fl4;
        struct rtable *rt;
-       struct dst_entry *dst;
+       struct dst_entry *odst = NULL;
        bool new = false;
 
        bh_lock_sock(sk);
@@ -1018,16 +1044,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        if (!ip_sk_accept_pmtu(sk))
                goto out;
 
-       rt = (struct rtable *) __sk_dst_get(sk);
+       odst = sk_dst_get(sk);
 
-       if (sock_owned_by_user(sk) || !rt) {
+       if (sock_owned_by_user(sk) || !odst) {
                __ipv4_sk_update_pmtu(skb, sk, mtu);
                goto out;
        }
 
        __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
 
-       if (!__sk_dst_check(sk, 0)) {
+       rt = (struct rtable *)odst;
+       if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
                rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
                if (IS_ERR(rt))
                        goto out;
@@ -1037,8 +1064,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
 
        __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
 
-       dst = dst_check(&rt->dst, 0);
-       if (!dst) {
+       if (!dst_check(&rt->dst, 0)) {
                if (new)
                        dst_release(&rt->dst);
 
@@ -1050,10 +1076,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
        }
 
        if (new)
-               __sk_dst_set(sk, &rt->dst);
+               sk_dst_set(sk, &rt->dst);
 
 out:
        bh_unlock_sock(sk);
+       dst_release(odst);
 }
 EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
 
index c86624b..c0c7568 100644 (file)
@@ -170,7 +170,8 @@ u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
 }
 EXPORT_SYMBOL_GPL(__cookie_v4_init_sequence);
 
-__u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp)
+__u32 cookie_v4_init_sequence(struct sock *sk, const struct sk_buff *skb,
+                             __u16 *mssp)
 {
        const struct iphdr *iph = ip_hdr(skb);
        const struct tcphdr *th = tcp_hdr(skb);
index eb1dde3..9d2118e 100644 (file)
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
        if (unlikely(tp->repair)) {
                if (tp->repair_queue == TCP_RECV_QUEUE) {
                        copied = tcp_send_rcvq(sk, msg, size);
-                       goto out;
+                       goto out_nopush;
                }
 
                err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
 out:
        if (copied)
                tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
+out_nopush:
        release_sock(sk);
        return copied + copied_syn;
 
index 62e48cf..9771563 100644 (file)
@@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
                                      struct dst_entry *dst,
                                      struct request_sock *req)
 {
-       struct tcp_sock *tp = tcp_sk(sk);
+       struct tcp_sock *tp;
        struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
        struct sock *child;
 
index 40661fc..7832d94 100644 (file)
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
        }
 
        /* D-SACK for already forgotten data... Do dumb counting. */
-       if (dup_sack && tp->undo_marker && tp->undo_retrans &&
+       if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
            !after(end_seq_0, prior_snd_una) &&
            after(end_seq_0, tp->undo_marker))
                tp->undo_retrans--;
@@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
                        unsigned int new_len = (pkt_len / mss) * mss;
                        if (!in_sack && new_len < pkt_len) {
                                new_len += mss;
-                               if (new_len > skb->len)
+                               if (new_len >= skb->len)
                                        return 0;
                        }
                        pkt_len = new_len;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
 
        /* Account D-SACK for retransmitted packet. */
        if (dup_sack && (sacked & TCPCB_RETRANS)) {
-               if (tp->undo_marker && tp->undo_retrans &&
+               if (tp->undo_marker && tp->undo_retrans > 0 &&
                    after(end_seq, tp->undo_marker))
                        tp->undo_retrans--;
                if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
        tp->lost_out = 0;
 
        tp->undo_marker = 0;
-       tp->undo_retrans = 0;
+       tp->undo_retrans = -1;
 }
 
 void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2475,7 +2475,7 @@ static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
  *     losses and/or application stalls), do not perform any further cwnd
  *     reductions, but instead slow start up to ssthresh.
  */
-static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
+static void tcp_init_cwnd_reduction(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
@@ -2485,8 +2485,7 @@ static void tcp_init_cwnd_reduction(struct sock *sk, const bool set_ssthresh)
        tp->prior_cwnd = tp->snd_cwnd;
        tp->prr_delivered = 0;
        tp->prr_out = 0;
-       if (set_ssthresh)
-               tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
+       tp->snd_ssthresh = inet_csk(sk)->icsk_ca_ops->ssthresh(sk);
        TCP_ECN_queue_cwr(tp);
 }
 
@@ -2528,14 +2527,14 @@ static inline void tcp_end_cwnd_reduction(struct sock *sk)
 }
 
 /* Enter CWR state. Disable cwnd undo since congestion is proven with ECN */
-void tcp_enter_cwr(struct sock *sk, const int set_ssthresh)
+void tcp_enter_cwr(struct sock *sk)
 {
        struct tcp_sock *tp = tcp_sk(sk);
 
        tp->prior_ssthresh = 0;
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                tp->undo_marker = 0;
-               tcp_init_cwnd_reduction(sk, set_ssthresh);
+               tcp_init_cwnd_reduction(sk);
                tcp_set_ca_state(sk, TCP_CA_CWR);
        }
 }
@@ -2564,7 +2563,7 @@ static void tcp_try_to_open(struct sock *sk, int flag, const int prior_unsacked)
                tp->retrans_stamp = 0;
 
        if (flag & FLAG_ECE)
-               tcp_enter_cwr(sk, 1);
+               tcp_enter_cwr(sk);
 
        if (inet_csk(sk)->icsk_ca_state != TCP_CA_CWR) {
                tcp_try_keep_open(sk);
@@ -2665,12 +2664,12 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
 
        tp->prior_ssthresh = 0;
        tp->undo_marker = tp->snd_una;
-       tp->undo_retrans = tp->retrans_out;
+       tp->undo_retrans = tp->retrans_out ? : -1;
 
        if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
                if (!ece_ack)
                        tp->prior_ssthresh = tcp_current_ssthresh(sk);
-               tcp_init_cwnd_reduction(sk, true);
+               tcp_init_cwnd_reduction(sk);
        }
        tcp_set_ca_state(sk, TCP_CA_Recovery);
 }
@@ -3346,7 +3345,7 @@ static void tcp_process_tlp_ack(struct sock *sk, u32 ack, int flag)
                tp->tlp_high_seq = 0;
                /* Don't reduce cwnd if DSACK arrives for TLP retrans. */
                if (!(flag & FLAG_DSACKING_ACK)) {
-                       tcp_init_cwnd_reduction(sk, true);
+                       tcp_init_cwnd_reduction(sk);
                        tcp_set_ca_state(sk, TCP_CA_CWR);
                        tcp_end_cwnd_reduction(sk);
                        tcp_try_keep_open(sk);
@@ -5877,3 +5876,153 @@ discard:
        return 0;
 }
 EXPORT_SYMBOL(tcp_rcv_state_process);
+
+static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+       if (family == AF_INET)
+               LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
+                              &ireq->ir_rmt_addr, port);
+#if IS_ENABLED(CONFIG_IPV6)
+       else if (family == AF_INET6)
+               LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"),
+                              &ireq->ir_v6_rmt_addr, port);
+#endif
+}
+
+int tcp_conn_request(struct request_sock_ops *rsk_ops,
+                    const struct tcp_request_sock_ops *af_ops,
+                    struct sock *sk, struct sk_buff *skb)
+{
+       struct tcp_options_received tmp_opt;
+       struct request_sock *req;
+       struct tcp_sock *tp = tcp_sk(sk);
+       struct dst_entry *dst = NULL;
+       __u32 isn = TCP_SKB_CB(skb)->when;
+       bool want_cookie = false, fastopen;
+       struct flowi fl;
+       struct tcp_fastopen_cookie foc = { .len = -1 };
+       int err;
+
+
+       /* TW buckets are converted to open requests without
+        * limitations, they conserve resources and peer is
+        * evidently real one.
+        */
+       if ((sysctl_tcp_syncookies == 2 ||
+            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
+               want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
+               if (!want_cookie)
+                       goto drop;
+       }
+
+
+       /* Accept backlog is full. If we have already queued enough
+        * of warm entries in syn queue, drop request. It is better than
+        * clogging syn queue with openreqs with exponentially increasing
+        * timeout.
+        */
+       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
+               goto drop;
+       }
+
+       req = inet_reqsk_alloc(rsk_ops);
+       if (!req)
+               goto drop;
+
+       tcp_rsk(req)->af_specific = af_ops;
+
+       tcp_clear_options(&tmp_opt);
+       tmp_opt.mss_clamp = af_ops->mss_clamp;
+       tmp_opt.user_mss  = tp->rx_opt.user_mss;
+       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+
+       if (want_cookie && !tmp_opt.saw_tstamp)
+               tcp_clear_options(&tmp_opt);
+
+       tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
+       tcp_openreq_init(req, &tmp_opt, skb, sk);
+
+       af_ops->init_req(req, sk, skb);
+
+       if (security_inet_conn_request(sk, skb, req))
+               goto drop_and_free;
+
+       if (!want_cookie || tmp_opt.tstamp_ok)
+               TCP_ECN_create_request(req, skb, sock_net(sk));
+
+       if (want_cookie) {
+               isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
+               req->cookie_ts = tmp_opt.tstamp_ok;
+       } else if (!isn) {
+               /* VJ's idea. We save last timestamp seen
+                * from the destination in peer table, when entering
+                * state TIME-WAIT, and check against it before
+                * accepting new connection request.
+                *
+                * If "isn" is not zero, this request hit alive
+                * timewait bucket, so that all the necessary checks
+                * are made in the function processing timewait state.
+                */
+               if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
+                       bool strict;
+
+                       dst = af_ops->route_req(sk, &fl, req, &strict);
+                       if (dst && strict &&
+                           !tcp_peer_is_proven(req, dst, true)) {
+                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
+                               goto drop_and_release;
+                       }
+               }
+               /* Kill the following clause, if you dislike this way. */
+               else if (!sysctl_tcp_syncookies &&
+                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
+                         (sysctl_max_syn_backlog >> 2)) &&
+                        !tcp_peer_is_proven(req, dst, false)) {
+                       /* Without syncookies last quarter of
+                        * backlog is filled with destinations,
+                        * proven to be alive.
+                        * It means that we continue to communicate
+                        * to destinations, already remembered
+                        * to the moment of synflood.
+                        */
+                       pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
+                                   rsk_ops->family);
+                       goto drop_and_release;
+               }
+
+               isn = af_ops->init_seq(skb);
+       }
+       if (!dst) {
+               dst = af_ops->route_req(sk, &fl, req, NULL);
+               if (!dst)
+                       goto drop_and_free;
+       }
+
+       tcp_rsk(req)->snt_isn = isn;
+       tcp_openreq_init_rwin(req, sk, dst);
+       fastopen = !want_cookie &&
+                  tcp_try_fastopen(sk, skb, req, &foc, dst);
+       err = af_ops->send_synack(sk, dst, &fl, req,
+                                 skb_get_queue_mapping(skb), &foc);
+       if (!fastopen) {
+               if (err || want_cookie)
+                       goto drop_and_free;
+
+               tcp_rsk(req)->listener = NULL;
+               af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
+       }
+
+       return 0;
+
+drop_and_release:
+       dst_release(dst);
+drop_and_free:
+       reqsk_free(req);
+drop:
+       NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
+       return 0;
+}
+EXPORT_SYMBOL(tcp_conn_request);
index 77cccda..992a1f9 100644 (file)
@@ -99,7 +99,7 @@ static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 struct inet_hashinfo tcp_hashinfo;
 EXPORT_SYMBOL(tcp_hashinfo);
 
-static inline __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
+static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
 {
        return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
                                          ip_hdr(skb)->saddr,
@@ -208,6 +208,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
        inet->inet_dport = usin->sin_port;
        inet->inet_daddr = daddr;
 
+       inet_set_txhash(sk);
+
        inet_csk(sk)->icsk_ext_hdr_len = 0;
        if (inet_opt)
                inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -342,11 +344,6 @@ void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
        int err;
        struct net *net = dev_net(icmp_skb->dev);
 
-       if (icmp_skb->len < (iph->ihl << 2) + 8) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
-               return;
-       }
-
        sk = inet_lookup(net, &tcp_hashinfo, iph->daddr, th->dest,
                        iph->saddr, th->source, inet_iif(icmp_skb));
        if (!sk) {
@@ -814,6 +811,7 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
  *     socket.
  */
 static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
+                             struct flowi *fl,
                              struct request_sock *req,
                              u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc)
@@ -837,24 +835,11 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
                                            ireq->ir_rmt_addr,
                                            ireq->opt);
                err = net_xmit_eval(err);
-               if (!tcp_rsk(req)->snt_synack && !err)
-                       tcp_rsk(req)->snt_synack = tcp_time_stamp;
        }
 
        return err;
 }
 
-static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req)
-{
-       int res = tcp_v4_send_synack(sk, NULL, req, 0, NULL);
-
-       if (!res) {
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-       }
-       return res;
-}
-
 /*
  *     IPv4 request_sock destructor.
  */
@@ -1064,7 +1049,7 @@ static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
        if (sin->sin_family != AF_INET)
                return -EINVAL;
 
-       if (!cmd.tcpm_key || !cmd.tcpm_keylen)
+       if (!cmd.tcpm_keylen)
                return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
                                      AF_INET);
 
@@ -1237,161 +1222,68 @@ static bool tcp_v4_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 
 #endif
 
+static void tcp_v4_init_req(struct request_sock *req, struct sock *sk,
+                           struct sk_buff *skb)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+
+       ireq->ir_loc_addr = ip_hdr(skb)->daddr;
+       ireq->ir_rmt_addr = ip_hdr(skb)->saddr;
+       ireq->no_srccheck = inet_sk(sk)->transparent;
+       ireq->opt = tcp_v4_save_options(skb);
+}
+
+static struct dst_entry *tcp_v4_route_req(struct sock *sk, struct flowi *fl,
+                                         const struct request_sock *req,
+                                         bool *strict)
+{
+       struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
+
+       if (strict) {
+               if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
+                       *strict = true;
+               else
+                       *strict = false;
+       }
+
+       return dst;
+}
+
 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
        .family         =       PF_INET,
        .obj_size       =       sizeof(struct tcp_request_sock),
-       .rtx_syn_ack    =       tcp_v4_rtx_synack,
+       .rtx_syn_ack    =       tcp_rtx_synack,
        .send_ack       =       tcp_v4_reqsk_send_ack,
        .destructor     =       tcp_v4_reqsk_destructor,
        .send_reset     =       tcp_v4_send_reset,
        .syn_ack_timeout =      tcp_syn_ack_timeout,
 };
 
-#ifdef CONFIG_TCP_MD5SIG
 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
+       .mss_clamp      =       TCP_MSS_DEFAULT,
+#ifdef CONFIG_TCP_MD5SIG
        .md5_lookup     =       tcp_v4_reqsk_md5_lookup,
        .calc_md5_hash  =       tcp_v4_md5_hash_skb,
-};
 #endif
+       .init_req       =       tcp_v4_init_req,
+#ifdef CONFIG_SYN_COOKIES
+       .cookie_init_seq =      cookie_v4_init_sequence,
+#endif
+       .route_req      =       tcp_v4_route_req,
+       .init_seq       =       tcp_v4_init_sequence,
+       .send_synack    =       tcp_v4_send_synack,
+       .queue_hash_add =       inet_csk_reqsk_queue_hash_add,
+};
 
 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcp_options_received tmp_opt;
-       struct request_sock *req;
-       struct inet_request_sock *ireq;
-       struct tcp_sock *tp = tcp_sk(sk);
-       struct dst_entry *dst = NULL;
-       __be32 saddr = ip_hdr(skb)->saddr;
-       __be32 daddr = ip_hdr(skb)->daddr;
-       __u32 isn = TCP_SKB_CB(skb)->when;
-       bool want_cookie = false, fastopen;
-       struct flowi4 fl4;
-       struct tcp_fastopen_cookie foc = { .len = -1 };
-       int err;
-
        /* Never answer to SYNs send to broadcast or multicast */
        if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
                goto drop;
 
-       /* TW buckets are converted to open requests without
-        * limitations, they conserve resources and peer is
-        * evidently real one.
-        */
-       if ((sysctl_tcp_syncookies == 2 ||
-            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
-               want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
-               if (!want_cookie)
-                       goto drop;
-       }
-
-       /* Accept backlog is full. If we have already queued enough
-        * of warm entries in syn queue, drop request. It is better than
-        * clogging syn queue with openreqs with exponentially increasing
-        * timeout.
-        */
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
-               goto drop;
-       }
-
-       req = inet_reqsk_alloc(&tcp_request_sock_ops);
-       if (!req)
-               goto drop;
-
-#ifdef CONFIG_TCP_MD5SIG
-       tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
-#endif
-
-       tcp_clear_options(&tmp_opt);
-       tmp_opt.mss_clamp = TCP_MSS_DEFAULT;
-       tmp_opt.user_mss  = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
-
-       if (want_cookie && !tmp_opt.saw_tstamp)
-               tcp_clear_options(&tmp_opt);
-
-       tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
-       tcp_openreq_init(req, &tmp_opt, skb);
-
-       ireq = inet_rsk(req);
-       ireq->ir_loc_addr = daddr;
-       ireq->ir_rmt_addr = saddr;
-       ireq->no_srccheck = inet_sk(sk)->transparent;
-       ireq->opt = tcp_v4_save_options(skb);
-       ireq->ir_mark = inet_request_mark(sk, skb);
-
-       if (security_inet_conn_request(sk, skb, req))
-               goto drop_and_free;
-
-       if (!want_cookie || tmp_opt.tstamp_ok)
-               TCP_ECN_create_request(req, skb, sock_net(sk));
-
-       if (want_cookie) {
-               isn = cookie_v4_init_sequence(sk, skb, &req->mss);
-               req->cookie_ts = tmp_opt.tstamp_ok;
-       } else if (!isn) {
-               /* VJ's idea. We save last timestamp seen
-                * from the destination in peer table, when entering
-                * state TIME-WAIT, and check against it before
-                * accepting new connection request.
-                *
-                * If "isn" is not zero, this request hit alive
-                * timewait bucket, so that all the necessary checks
-                * are made in the function processing timewait state.
-                */
-               if (tmp_opt.saw_tstamp &&
-                   tcp_death_row.sysctl_tw_recycle &&
-                   (dst = inet_csk_route_req(sk, &fl4, req)) != NULL &&
-                   fl4.daddr == saddr) {
-                       if (!tcp_peer_is_proven(req, dst, true)) {
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
-                               goto drop_and_release;
-                       }
-               }
-               /* Kill the following clause, if you dislike this way. */
-               else if (!sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                         (sysctl_max_syn_backlog >> 2)) &&
-                        !tcp_peer_is_proven(req, dst, false)) {
-                       /* Without syncookies last quarter of
-                        * backlog is filled with destinations,
-                        * proven to be alive.
-                        * It means that we continue to communicate
-                        * to destinations, already remembered
-                        * to the moment of synflood.
-                        */
-                       LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
-                                      &saddr, ntohs(tcp_hdr(skb)->source));
-                       goto drop_and_release;
-               }
-
-               isn = tcp_v4_init_sequence(skb);
-       }
-       if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
-               goto drop_and_free;
-
-       tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       tcp_openreq_init_rwin(req, sk, dst);
-       fastopen = !want_cookie &&
-                  tcp_try_fastopen(sk, skb, req, &foc, dst);
-       err = tcp_v4_send_synack(sk, dst, req,
-                                skb_get_queue_mapping(skb), &foc);
-       if (!fastopen) {
-               if (err || want_cookie)
-                       goto drop_and_free;
-
-               tcp_rsk(req)->snt_synack = tcp_time_stamp;
-               tcp_rsk(req)->listener = NULL;
-               inet_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-       }
-
-       return 0;
+       return tcp_conn_request(&tcp_request_sock_ops,
+                               &tcp_request_sock_ipv4_ops, sk, skb);
 
-drop_and_release:
-       dst_release(dst);
-drop_and_free:
-       reqsk_free(req);
 drop:
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0;
@@ -1439,6 +1331,7 @@ struct sock *tcp_v4_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newinet->mc_ttl       = ip_hdr(skb)->ttl;
        newinet->rcv_tos      = ip_hdr(skb)->tos;
        inet_csk(newsk)->icsk_ext_hdr_len = 0;
+       inet_set_txhash(newsk);
        if (inet_opt)
                inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
        newinet->inet_id = newtp->write_seq ^ jiffies;
index 4fe0418..0d54e59 100644 (file)
@@ -1093,7 +1093,6 @@ static const struct genl_ops tcp_metrics_nl_ops[] = {
                .doit = tcp_metrics_nl_cmd_get,
                .dumpit = tcp_metrics_nl_dump,
                .policy = tcp_metrics_nl_policy,
-               .flags = GENL_ADMIN_PERM,
        },
        {
                .cmd = TCP_METRICS_CMD_DEL,
index e68e0d4..1649988 100644 (file)
@@ -298,7 +298,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_tclass = np->tclass;
                        tw->tw_flowlabel = np->flow_label >> 12;
-                       tw->tw_ipv6only = np->ipv6only;
+                       tw->tw_ipv6only = sk->sk_ipv6only;
                }
 #endif
 
index 4e86c59..55046ec 100644 (file)
@@ -309,7 +309,7 @@ static int tcp4_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v4_check(skb->len - thoff, iph->saddr,
                                  iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV4;
 
        return tcp_gro_complete(skb);
 }
index d92bce0..8fcfc91 100644 (file)
@@ -916,6 +916,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        skb_orphan(skb);
        skb->sk = sk;
        skb->destructor = tcp_wfree;
+       skb_set_hash_from_sk(skb, sk);
        atomic_add(skb->truesize, &sk->sk_wmem_alloc);
 
        /* Build TCP header and checksum it. */
@@ -978,7 +979,7 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it,
        if (likely(err <= 0))
                return err;
 
-       tcp_enter_cwr(sk, 1);
+       tcp_enter_cwr(sk);
 
        return net_xmit_eval(err);
 }
@@ -2525,8 +2526,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
                if (!tp->retrans_stamp)
                        tp->retrans_stamp = TCP_SKB_CB(skb)->when;
 
-               tp->undo_retrans += tcp_skb_pcount(skb);
-
                /* snd_nxt is stored to detect loss of retransmitted segment,
                 * see tcp_input.c tcp_sacktag_write_queue().
                 */
@@ -2534,6 +2533,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
        } else if (err != -EBUSY) {
                NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
        }
+
+       if (tp->undo_retrans < 0)
+               tp->undo_retrans = 0;
+       tp->undo_retrans += tcp_skb_pcount(skb);
        return err;
 }
 
@@ -3299,3 +3302,18 @@ void tcp_send_probe0(struct sock *sk)
                                          TCP_RTO_MAX);
        }
 }
+
+int tcp_rtx_synack(struct sock *sk, struct request_sock *req)
+{
+       const struct tcp_request_sock_ops *af_ops = tcp_rsk(req)->af_specific;
+       struct flowi fl;
+       int res;
+
+       res = af_ops->send_synack(sk, NULL, &fl, req, 0, NULL);
+       if (!res) {
+               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
+               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
+       }
+       return res;
+}
+EXPORT_SYMBOL(tcp_rtx_synack);
index d92f94b..f57c0e4 100644 (file)
@@ -594,27 +594,6 @@ static inline bool __udp_is_mcast_sock(struct net *net, struct sock *sk,
        return true;
 }
 
-static inline struct sock *udp_v4_mcast_next(struct net *net, struct sock *sk,
-                                            __be16 loc_port, __be32 loc_addr,
-                                            __be16 rmt_port, __be32 rmt_addr,
-                                            int dif)
-{
-       struct hlist_nulls_node *node;
-       struct sock *s = sk;
-       unsigned short hnum = ntohs(loc_port);
-
-       sk_nulls_for_each_from(s, node) {
-               if (__udp_is_mcast_sock(net, s,
-                                       loc_port, loc_addr,
-                                       rmt_port, rmt_addr,
-                                       dif, hnum))
-                       goto found;
-       }
-       s = NULL;
-found:
-       return s;
-}
-
 /*
  * This routine is called by the ICMP module when it gets some
  * sort of error condition.  If err < 0 then the socket should
@@ -1588,8 +1567,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                goto csum_error;
 
 
-       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+       if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+               UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
+                                is_udplite);
                goto drop;
+       }
 
        rc = 0;
 
@@ -1637,6 +1619,8 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
                if (skb1 && udp_queue_rcv_skb(sk, skb1) <= 0)
                        skb1 = NULL;
+
+               sock_put(sk);
        }
        if (unlikely(skb1))
                kfree_skb(skb1);
@@ -1665,41 +1649,50 @@ static int __udp4_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
                                    struct udp_table *udptable)
 {
        struct sock *sk, *stack[256 / sizeof(struct sock *)];
-       struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
-       int dif;
-       unsigned int i, count = 0;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(uh->dest);
+       struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+       int dif = skb->dev->ifindex;
+       unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
+       unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
+
+       if (use_hash2) {
+               hash2_any = udp4_portaddr_hash(net, htonl(INADDR_ANY), hnum) &
+                           udp_table.mask;
+               hash2 = udp4_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+start_lookup:
+               hslot = &udp_table.hash2[hash2];
+               offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
+       }
 
        spin_lock(&hslot->lock);
-       sk = sk_nulls_head(&hslot->head);
-       dif = skb->dev->ifindex;
-       sk = udp_v4_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
-       while (sk) {
-               stack[count++] = sk;
-               sk = udp_v4_mcast_next(net, sk_nulls_next(sk), uh->dest,
-                                      daddr, uh->source, saddr, dif);
-               if (unlikely(count == ARRAY_SIZE(stack))) {
-                       if (!sk)
-                               break;
-                       flush_stack(stack, count, skb, ~0);
-                       count = 0;
+       sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
+               if (__udp_is_mcast_sock(net, sk,
+                                       uh->dest, daddr,
+                                       uh->source, saddr,
+                                       dif, hnum)) {
+                       if (unlikely(count == ARRAY_SIZE(stack))) {
+                               flush_stack(stack, count, skb, ~0);
+                               count = 0;
+                       }
+                       stack[count++] = sk;
+                       sock_hold(sk);
                }
        }
-       /*
-        * before releasing chain lock, we must take a reference on sockets
-        */
-       for (i = 0; i < count; i++)
-               sock_hold(stack[i]);
 
        spin_unlock(&hslot->lock);
 
+       /* Also lookup *:port if we are using hash2 and haven't done so yet. */
+       if (use_hash2 && hash2 != hash2_any) {
+               hash2 = hash2_any;
+               goto start_lookup;
+       }
+
        /*
         * do the slow work with no lock held
         */
        if (count) {
                flush_stack(stack, count, skb, count - 1);
-
-               for (i = 0; i < count; i++)
-                       sock_put(stack[i]);
        } else {
                kfree_skb(skb);
        }
@@ -2523,79 +2516,3 @@ void __init udp_init(void)
        sysctl_udp_rmem_min = SK_MEM_QUANTUM;
        sysctl_udp_wmem_min = SK_MEM_QUANTUM;
 }
-
-struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
-                                      netdev_features_t features)
-{
-       struct sk_buff *segs = ERR_PTR(-EINVAL);
-       u16 mac_offset = skb->mac_header;
-       int mac_len = skb->mac_len;
-       int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
-       __be16 protocol = skb->protocol;
-       netdev_features_t enc_features;
-       int udp_offset, outer_hlen;
-       unsigned int oldlen;
-       bool need_csum;
-
-       oldlen = (u16)~skb->len;
-
-       if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
-               goto out;
-
-       skb->encapsulation = 0;
-       __skb_pull(skb, tnl_hlen);
-       skb_reset_mac_header(skb);
-       skb_set_network_header(skb, skb_inner_network_offset(skb));
-       skb->mac_len = skb_inner_network_offset(skb);
-       skb->protocol = htons(ETH_P_TEB);
-
-       need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
-       if (need_csum)
-               skb->encap_hdr_csum = 1;
-
-       /* segment inner packet. */
-       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
-       segs = skb_mac_gso_segment(skb, enc_features);
-       if (!segs || IS_ERR(segs)) {
-               skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
-                                    mac_len);
-               goto out;
-       }
-
-       outer_hlen = skb_tnl_header_len(skb);
-       udp_offset = outer_hlen - tnl_hlen;
-       skb = segs;
-       do {
-               struct udphdr *uh;
-               int len;
-
-               skb_reset_inner_headers(skb);
-               skb->encapsulation = 1;
-
-               skb->mac_len = mac_len;
-
-               skb_push(skb, outer_hlen);
-               skb_reset_mac_header(skb);
-               skb_set_network_header(skb, mac_len);
-               skb_set_transport_header(skb, udp_offset);
-               len = skb->len - udp_offset;
-               uh = udp_hdr(skb);
-               uh->len = htons(len);
-
-               if (need_csum) {
-                       __be32 delta = htonl(oldlen + len);
-
-                       uh->check = ~csum_fold((__force __wsum)
-                                              ((__force u32)uh->check +
-                                               (__force u32)delta));
-                       uh->check = gso_make_checksum(skb, ~uh->check);
-
-                       if (uh->check == 0)
-                               uh->check = CSUM_MANGLED_0;
-               }
-
-               skb->protocol = protocol;
-       } while ((skb = skb->next));
-out:
-       return segs;
-}
index 546d2d4..59035bc 100644 (file)
@@ -47,6 +47,82 @@ static int udp4_ufo_send_check(struct sk_buff *skb)
        return 0;
 }
 
+struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
+                                      netdev_features_t features)
+{
+       struct sk_buff *segs = ERR_PTR(-EINVAL);
+       u16 mac_offset = skb->mac_header;
+       int mac_len = skb->mac_len;
+       int tnl_hlen = skb_inner_mac_header(skb) - skb_transport_header(skb);
+       __be16 protocol = skb->protocol;
+       netdev_features_t enc_features;
+       int udp_offset, outer_hlen;
+       unsigned int oldlen;
+       bool need_csum;
+
+       oldlen = (u16)~skb->len;
+
+       if (unlikely(!pskb_may_pull(skb, tnl_hlen)))
+               goto out;
+
+       skb->encapsulation = 0;
+       __skb_pull(skb, tnl_hlen);
+       skb_reset_mac_header(skb);
+       skb_set_network_header(skb, skb_inner_network_offset(skb));
+       skb->mac_len = skb_inner_network_offset(skb);
+       skb->protocol = htons(ETH_P_TEB);
+
+       need_csum = !!(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL_CSUM);
+       if (need_csum)
+               skb->encap_hdr_csum = 1;
+
+       /* segment inner packet. */
+       enc_features = skb->dev->hw_enc_features & netif_skb_features(skb);
+       segs = skb_mac_gso_segment(skb, enc_features);
+       if (IS_ERR_OR_NULL(segs)) {
+               skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
+                                    mac_len);
+               goto out;
+       }
+
+       outer_hlen = skb_tnl_header_len(skb);
+       udp_offset = outer_hlen - tnl_hlen;
+       skb = segs;
+       do {
+               struct udphdr *uh;
+               int len;
+
+               skb_reset_inner_headers(skb);
+               skb->encapsulation = 1;
+
+               skb->mac_len = mac_len;
+
+               skb_push(skb, outer_hlen);
+               skb_reset_mac_header(skb);
+               skb_set_network_header(skb, mac_len);
+               skb_set_transport_header(skb, udp_offset);
+               len = skb->len - udp_offset;
+               uh = udp_hdr(skb);
+               uh->len = htons(len);
+
+               if (need_csum) {
+                       __be32 delta = htonl(oldlen + len);
+
+                       uh->check = ~csum_fold((__force __wsum)
+                                              ((__force u32)uh->check +
+                                               (__force u32)delta));
+                       uh->check = gso_make_checksum(skb, ~uh->check);
+
+                       if (uh->check == 0)
+                               uh->check = CSUM_MANGLED_0;
+               }
+
+               skb->protocol = protocol;
+       } while ((skb = skb->next));
+out:
+       return segs;
+}
+
 static struct sk_buff *udp4_ufo_fragment(struct sk_buff *skb,
                                         netdev_features_t features)
 {
diff --git a/net/ipv4/udp_tunnel.c b/net/ipv4/udp_tunnel.c
new file mode 100644 (file)
index 0000000..61ec1a6
--- /dev/null
@@ -0,0 +1,100 @@
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/udp.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <net/udp.h>
+#include <net/udp_tunnel.h>
+#include <net/net_namespace.h>
+
+int udp_sock_create(struct net *net, struct udp_port_cfg *cfg,
+                   struct socket **sockp)
+{
+       int err = -EINVAL;
+       struct socket *sock = NULL;
+
+#if IS_ENABLED(CONFIG_IPV6)
+       if (cfg->family == AF_INET6) {
+               struct sockaddr_in6 udp6_addr;
+
+               err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
+               if (err < 0)
+                       goto error;
+
+               sk_change_net(sock->sk, net);
+
+               udp6_addr.sin6_family = AF_INET6;
+               memcpy(&udp6_addr.sin6_addr, &cfg->local_ip6,
+                      sizeof(udp6_addr.sin6_addr));
+               udp6_addr.sin6_port = cfg->local_udp_port;
+               err = kernel_bind(sock, (struct sockaddr *)&udp6_addr,
+                                 sizeof(udp6_addr));
+               if (err < 0)
+                       goto error;
+
+               if (cfg->peer_udp_port) {
+                       udp6_addr.sin6_family = AF_INET6;
+                       memcpy(&udp6_addr.sin6_addr, &cfg->peer_ip6,
+                              sizeof(udp6_addr.sin6_addr));
+                       udp6_addr.sin6_port = cfg->peer_udp_port;
+                       err = kernel_connect(sock,
+                                            (struct sockaddr *)&udp6_addr,
+                                            sizeof(udp6_addr), 0);
+               }
+               if (err < 0)
+                       goto error;
+
+               udp_set_no_check6_tx(sock->sk, !cfg->use_udp6_tx_checksums);
+               udp_set_no_check6_rx(sock->sk, !cfg->use_udp6_rx_checksums);
+       } else
+#endif
+       if (cfg->family == AF_INET) {
+               struct sockaddr_in udp_addr;
+
+               err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
+               if (err < 0)
+                       goto error;
+
+               sk_change_net(sock->sk, net);
+
+               udp_addr.sin_family = AF_INET;
+               udp_addr.sin_addr = cfg->local_ip;
+               udp_addr.sin_port = cfg->local_udp_port;
+               err = kernel_bind(sock, (struct sockaddr *)&udp_addr,
+                                 sizeof(udp_addr));
+               if (err < 0)
+                       goto error;
+
+               if (cfg->peer_udp_port) {
+                       udp_addr.sin_family = AF_INET;
+                       udp_addr.sin_addr = cfg->peer_ip;
+                       udp_addr.sin_port = cfg->peer_udp_port;
+                       err = kernel_connect(sock,
+                                            (struct sockaddr *)&udp_addr,
+                                            sizeof(udp_addr), 0);
+                       if (err < 0)
+                               goto error;
+               }
+
+               sock->sk->sk_no_check_tx = !cfg->use_udp_checksums;
+       } else {
+               return -EPFNOSUPPORT;
+       }
+
+
+       *sockp = sock;
+
+       return 0;
+
+error:
+       if (sock) {
+               kernel_sock_shutdown(sock, SHUT_RDWR);
+               sk_release_kernel(sock->sk);
+       }
+       *sockp = NULL;
+       return err;
+}
+EXPORT_SYMBOL(udp_sock_create);
+
+MODULE_LICENSE("GPL");
index a2ce010..dccefa9 100644 (file)
@@ -124,7 +124,7 @@ static int xfrm4_ah_rcv(struct sk_buff *skb)
 
        for_each_protocol_rcu(ah4_handlers, handler)
                if ((ret = handler->handler(skb)) != -EINVAL)
-                       return ret;;
+                       return ret;
 
        icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
 
index 5667b30..0b239fc 100644 (file)
@@ -108,11 +108,12 @@ static inline u32 cstamp_delta(unsigned long cstamp)
 }
 
 #ifdef CONFIG_SYSCTL
-static void addrconf_sysctl_register(struct inet6_dev *idev);
+static int addrconf_sysctl_register(struct inet6_dev *idev);
 static void addrconf_sysctl_unregister(struct inet6_dev *idev);
 #else
-static inline void addrconf_sysctl_register(struct inet6_dev *idev)
+static inline int addrconf_sysctl_register(struct inet6_dev *idev)
 {
+       return 0;
 }
 
 static inline void addrconf_sysctl_unregister(struct inet6_dev *idev)
@@ -186,6 +187,7 @@ static struct ipv6_devconf ipv6_devconf __read_mostly = {
        .max_desync_factor      = MAX_DESYNC_FACTOR,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
+       .accept_ra_from_local   = 0,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -222,6 +224,7 @@ static struct ipv6_devconf ipv6_devconf_dflt __read_mostly = {
        .max_desync_factor      = MAX_DESYNC_FACTOR,
        .max_addresses          = IPV6_MAX_ADDRESSES,
        .accept_ra_defrtr       = 1,
+       .accept_ra_from_local   = 0,
        .accept_ra_pinfo        = 1,
 #ifdef CONFIG_IPV6_ROUTER_PREF
        .accept_ra_rtr_pref     = 1,
@@ -308,16 +311,16 @@ err_ip:
 static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 {
        struct inet6_dev *ndev;
+       int err = -ENOMEM;
 
        ASSERT_RTNL();
 
        if (dev->mtu < IPV6_MIN_MTU)
-               return NULL;
+               return ERR_PTR(-EINVAL);
 
        ndev = kzalloc(sizeof(struct inet6_dev), GFP_KERNEL);
-
        if (ndev == NULL)
-               return NULL;
+               return ERR_PTR(err);
 
        rwlock_init(&ndev->lock);
        ndev->dev = dev;
@@ -330,7 +333,7 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
        ndev->nd_parms = neigh_parms_alloc(dev, &nd_tbl);
        if (ndev->nd_parms == NULL) {
                kfree(ndev);
-               return NULL;
+               return ERR_PTR(err);
        }
        if (ndev->cnf.forwarding)
                dev_disable_lro(dev);
@@ -344,17 +347,14 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
                neigh_parms_release(&nd_tbl, ndev->nd_parms);
                dev_put(dev);
                kfree(ndev);
-               return NULL;
+               return ERR_PTR(err);
        }
 
        if (snmp6_register_dev(ndev) < 0) {
                ADBG(KERN_WARNING
                        "%s: cannot create /proc/net/dev_snmp6/%s\n",
                        __func__, dev->name);
-               neigh_parms_release(&nd_tbl, ndev->nd_parms);
-               ndev->dead = 1;
-               in6_dev_finish_destroy(ndev);
-               return NULL;
+               goto err_release;
        }
 
        /* One reference from device.  We must do this before
@@ -392,7 +392,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
 
        ipv6_mc_init_dev(ndev);
        ndev->tstamp = jiffies;
-       addrconf_sysctl_register(ndev);
+       err = addrconf_sysctl_register(ndev);
+       if (err) {
+               ipv6_mc_destroy_dev(ndev);
+               del_timer(&ndev->regen_timer);
+               goto err_release;
+       }
        /* protected by rtnl_lock */
        rcu_assign_pointer(dev->ip6_ptr, ndev);
 
@@ -407,6 +412,12 @@ static struct inet6_dev *ipv6_add_dev(struct net_device *dev)
                ipv6_dev_mc_inc(dev, &in6addr_linklocal_allrouters);
 
        return ndev;
+
+err_release:
+       neigh_parms_release(&nd_tbl, ndev->nd_parms);
+       ndev->dead = 1;
+       in6_dev_finish_destroy(ndev);
+       return ERR_PTR(err);
 }
 
 static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
@@ -418,7 +429,7 @@ static struct inet6_dev *ipv6_find_idev(struct net_device *dev)
        idev = __in6_dev_get(dev);
        if (!idev) {
                idev = ipv6_add_dev(dev);
-               if (!idev)
+               if (IS_ERR(idev))
                        return NULL;
        }
 
@@ -2728,9 +2739,25 @@ static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr
        }
 }
 
+static void addrconf_addr_gen(struct inet6_dev *idev, bool prefix_route)
+{
+       if (idev->addr_gen_mode == IN6_ADDR_GEN_MODE_EUI64) {
+               struct in6_addr addr;
+
+               ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
+               /* addrconf_add_linklocal also adds a prefix_route and we
+                * only need to care about prefix routes if ipv6_generate_eui64
+                * couldn't generate one.
+                */
+               if (ipv6_generate_eui64(addr.s6_addr + 8, idev->dev) == 0)
+                       addrconf_add_linklocal(idev, &addr);
+               else if (prefix_route)
+                       addrconf_prefix_route(&addr, 64, idev->dev, 0, 0);
+       }
+}
+
 static void addrconf_dev_config(struct net_device *dev)
 {
-       struct in6_addr addr;
        struct inet6_dev *idev;
 
        ASSERT_RTNL();
@@ -2751,11 +2778,7 @@ static void addrconf_dev_config(struct net_device *dev)
        if (IS_ERR(idev))
                return;
 
-       memset(&addr, 0, sizeof(struct in6_addr));
-       addr.s6_addr32[0] = htonl(0xFE800000);
-
-       if (ipv6_generate_eui64(addr.s6_addr + 8, dev) == 0)
-               addrconf_add_linklocal(idev, &addr);
+       addrconf_addr_gen(idev, false);
 }
 
 #if IS_ENABLED(CONFIG_IPV6_SIT)
@@ -2777,11 +2800,7 @@ static void addrconf_sit_config(struct net_device *dev)
        }
 
        if (dev->priv_flags & IFF_ISATAP) {
-               struct in6_addr addr;
-
-               ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
-               if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
-                       addrconf_add_linklocal(idev, &addr);
+               addrconf_addr_gen(idev, false);
                return;
        }
 
@@ -2796,7 +2815,6 @@ static void addrconf_sit_config(struct net_device *dev)
 static void addrconf_gre_config(struct net_device *dev)
 {
        struct inet6_dev *idev;
-       struct in6_addr addr;
 
        ASSERT_RTNL();
 
@@ -2805,11 +2823,7 @@ static void addrconf_gre_config(struct net_device *dev)
                return;
        }
 
-       ipv6_addr_set(&addr,  htonl(0xFE800000), 0, 0, 0);
-       if (!ipv6_generate_eui64(addr.s6_addr + 8, dev))
-               addrconf_add_linklocal(idev, &addr);
-       else
-               addrconf_prefix_route(&addr, 64, dev, 0, 0);
+       addrconf_addr_gen(idev, true);
 }
 #endif
 
@@ -2825,8 +2839,8 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_REGISTER:
                if (!idev && dev->mtu >= IPV6_MIN_MTU) {
                        idev = ipv6_add_dev(dev);
-                       if (!idev)
-                               return notifier_from_errno(-ENOMEM);
+                       if (IS_ERR(idev))
+                               return notifier_from_errno(PTR_ERR(idev));
                }
                break;
 
@@ -2846,7 +2860,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        if (!idev && dev->mtu >= IPV6_MIN_MTU)
                                idev = ipv6_add_dev(dev);
 
-                       if (idev) {
+                       if (!IS_ERR_OR_NULL(idev)) {
                                idev->if_flags |= IF_READY;
                                run_pending = 1;
                        }
@@ -2889,7 +2903,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                        break;
                }
 
-               if (idev) {
+               if (!IS_ERR_OR_NULL(idev)) {
                        if (run_pending)
                                addrconf_dad_run(idev);
 
@@ -2924,7 +2938,7 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
 
                if (!idev && dev->mtu >= IPV6_MIN_MTU) {
                        idev = ipv6_add_dev(dev);
-                       if (idev)
+                       if (!IS_ERR(idev))
                                break;
                }
 
@@ -2945,10 +2959,14 @@ static int addrconf_notify(struct notifier_block *this, unsigned long event,
                if (idev) {
                        snmp6_unregister_dev(idev);
                        addrconf_sysctl_unregister(idev);
-                       addrconf_sysctl_register(idev);
-                       err = snmp6_register_dev(idev);
+                       err = addrconf_sysctl_register(idev);
                        if (err)
                                return notifier_from_errno(err);
+                       err = snmp6_register_dev(idev);
+                       if (err) {
+                               addrconf_sysctl_unregister(idev);
+                               return notifier_from_errno(err);
+                       }
                }
                break;
 
@@ -4321,6 +4339,7 @@ static inline void ipv6_store_devconf(struct ipv6_devconf *cnf,
        array[DEVCONF_FORCE_TLLAO] = cnf->force_tllao;
        array[DEVCONF_NDISC_NOTIFY] = cnf->ndisc_notify;
        array[DEVCONF_SUPPRESS_FRAG_NDISC] = cnf->suppress_frag_ndisc;
+       array[DEVCONF_ACCEPT_RA_FROM_LOCAL] = cnf->accept_ra_from_local;
 }
 
 static inline size_t inet6_ifla6_size(void)
@@ -4420,6 +4439,10 @@ static int inet6_fill_ifla6_attrs(struct sk_buff *skb, struct inet6_dev *idev)
        nla = nla_reserve(skb, IFLA_INET6_TOKEN, sizeof(struct in6_addr));
        if (nla == NULL)
                goto nla_put_failure;
+
+       if (nla_put_u8(skb, IFLA_INET6_ADDR_GEN_MODE, idev->addr_gen_mode))
+               goto nla_put_failure;
+
        read_lock_bh(&idev->lock);
        memcpy(nla_data(nla), idev->token.s6_addr, nla_len(nla));
        read_unlock_bh(&idev->lock);
@@ -4524,8 +4547,21 @@ static int inet6_set_link_af(struct net_device *dev, const struct nlattr *nla)
        if (nla_parse_nested(tb, IFLA_INET6_MAX, nla, NULL) < 0)
                BUG();
 
-       if (tb[IFLA_INET6_TOKEN])
+       if (tb[IFLA_INET6_TOKEN]) {
                err = inet6_set_iftoken(idev, nla_data(tb[IFLA_INET6_TOKEN]));
+               if (err)
+                       return err;
+       }
+
+       if (tb[IFLA_INET6_ADDR_GEN_MODE]) {
+               u8 mode = nla_get_u8(tb[IFLA_INET6_ADDR_GEN_MODE]);
+
+               if (mode != IN6_ADDR_GEN_MODE_EUI64 &&
+                   mode != IN6_ADDR_GEN_MODE_NONE)
+                       return -EINVAL;
+               idev->addr_gen_mode = mode;
+               err = 0;
+       }
 
        return err;
 }
@@ -5167,6 +5203,13 @@ static struct addrconf_sysctl_table
                        .mode           = 0644,
                        .proc_handler   = proc_dointvec
                },
+               {
+                       .procname       = "accept_ra_from_local",
+                       .data           = &ipv6_devconf.accept_ra_from_local,
+                       .maxlen         = sizeof(int),
+                       .mode           = 0644,
+                       .proc_handler   = proc_dointvec,
+               },
                {
                        /* sentinel */
                }
@@ -5218,12 +5261,23 @@ static void __addrconf_sysctl_unregister(struct ipv6_devconf *p)
        kfree(t);
 }
 
-static void addrconf_sysctl_register(struct inet6_dev *idev)
+static int addrconf_sysctl_register(struct inet6_dev *idev)
 {
-       neigh_sysctl_register(idev->dev, idev->nd_parms,
-                             &ndisc_ifinfo_sysctl_change);
-       __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
-                                       idev, &idev->cnf);
+       int err;
+
+       if (!sysctl_dev_name_is_allowed(idev->dev->name))
+               return -EINVAL;
+
+       err = neigh_sysctl_register(idev->dev, idev->nd_parms,
+                                   &ndisc_ifinfo_sysctl_change);
+       if (err)
+               return err;
+       err = __addrconf_sysctl_register(dev_net(idev->dev), idev->dev->name,
+                                        idev, &idev->cnf);
+       if (err)
+               neigh_sysctl_unregister(idev->nd_parms);
+
+       return err;
 }
 
 static void addrconf_sysctl_unregister(struct inet6_dev *idev)
@@ -5308,6 +5362,7 @@ static struct rtnl_af_ops inet6_ops = {
 
 int __init addrconf_init(void)
 {
+       struct inet6_dev *idev;
        int i, err;
 
        err = ipv6_addr_label_init();
@@ -5346,11 +5401,12 @@ int __init addrconf_init(void)
         * device and it being up should be removed.
         */
        rtnl_lock();
-       if (!ipv6_add_dev(init_net.loopback_dev))
-               err = -ENOMEM;
+       idev = ipv6_add_dev(init_net.loopback_dev);
        rtnl_unlock();
-       if (err)
+       if (IS_ERR(idev)) {
+               err = PTR_ERR(idev);
                goto errlo;
+       }
 
        for (i = 0; i < IN6_ADDR_HSIZE; i++)
                INIT_HLIST_HEAD(&inet6_addr_lst[i]);
index 7cb4392..2daa3a1 100644 (file)
@@ -197,7 +197,7 @@ lookup_protocol:
        np->mcast_hops  = IPV6_DEFAULT_MCASTHOPS;
        np->mc_loop     = 1;
        np->pmtudisc    = IPV6_PMTUDISC_WANT;
-       np->ipv6only    = net->ipv6.sysctl.bindv6only;
+       sk->sk_ipv6only = net->ipv6.sysctl.bindv6only;
 
        /* Init the ipv4 part of the socket since we can have sockets
         * using v6 API for ipv4.
@@ -294,7 +294,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
                /* Binding to v4-mapped address on a v6-only socket
                 * makes no sense
                 */
-               if (np->ipv6only) {
+               if (sk->sk_ipv6only) {
                        err = -EINVAL;
                        goto out;
                }
@@ -371,7 +371,7 @@ int inet6_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len)
        if (addr_type != IPV6_ADDR_ANY) {
                sk->sk_userlocks |= SOCK_BINDADDR_LOCK;
                if (addr_type != IPV6_ADDR_MAPPED)
-                       np->ipv6only = 1;
+                       sk->sk_ipv6only = 1;
        }
        if (snum)
                sk->sk_userlocks |= SOCK_BINDPORT_LOCK;
@@ -765,6 +765,7 @@ static int __net_init inet6_net_init(struct net *net)
        net->ipv6.sysctl.bindv6only = 0;
        net->ipv6.sysctl.icmpv6_time = 1*HZ;
        net->ipv6.sysctl.flowlabel_consistency = 1;
+       net->ipv6.sysctl.auto_flowlabels = 0;
        atomic_set(&net->ipv6.rt_genid, 0);
 
        err = ipv6_init_mibs(net);
index c3bf2d2..2753319 100644 (file)
@@ -199,6 +199,7 @@ ipv4_connected:
                      NULL);
 
        sk->sk_state = TCP_ESTABLISHED;
+       ip6_set_txhash(sk);
 out:
        fl6_sock_release(flowlabel);
        return err;
index f6c84a6..06ba3e5 100644 (file)
@@ -626,9 +626,10 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
        int inner_offset;
        __be16 frag_off;
        u8 nexthdr;
+       struct net *net = dev_net(skb->dev);
 
        if (!pskb_may_pull(skb, sizeof(struct ipv6hdr)))
-               return;
+               goto out;
 
        nexthdr = ((struct ipv6hdr *)skb->data)->nexthdr;
        if (ipv6_ext_hdr(nexthdr)) {
@@ -636,14 +637,14 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
                inner_offset = ipv6_skip_exthdr(skb, sizeof(struct ipv6hdr),
                                                &nexthdr, &frag_off);
                if (inner_offset<0)
-                       return;
+                       goto out;
        } else {
                inner_offset = sizeof(struct ipv6hdr);
        }
 
        /* Checkin header including 8 bytes of inner protocol header. */
        if (!pskb_may_pull(skb, inner_offset+8))
-               return;
+               goto out;
 
        /* BUGGG_FUTURE: we should try to parse exthdrs in this packet.
           Without this we will not able f.e. to make source routed
@@ -652,13 +653,15 @@ void icmpv6_notify(struct sk_buff *skb, u8 type, u8 code, __be32 info)
           --ANK (980726)
         */
 
-       rcu_read_lock();
        ipprot = rcu_dereference(inet6_protos[nexthdr]);
        if (ipprot && ipprot->err_handler)
                ipprot->err_handler(skb, NULL, type, code, inner_offset, info);
-       rcu_read_unlock();
 
        raw6_icmp_error(skb, nexthdr, type, code, inner_offset, info);
+       return;
+
+out:
+       ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev), ICMP6_MIB_INERRORS);
 }
 
 /*
index 3873181..5f19dfb 100644 (file)
@@ -322,7 +322,8 @@ static struct ip6_tnl *ip6gre_tunnel_locate(struct net *net,
        else
                strcpy(name, "ip6gre%d");
 
-       dev = alloc_netdev(sizeof(*t), name, ip6gre_tunnel_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+                          ip6gre_tunnel_setup);
        if (!dev)
                return NULL;
 
@@ -723,7 +724,8 @@ static netdev_tx_t ip6gre_xmit2(struct sk_buff *skb,
         *      Push down and install the IP header.
         */
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
+       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
        ipv6h->hop_limit = tunnel->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
@@ -1174,7 +1176,9 @@ static int ip6gre_header(struct sk_buff *skb, struct net_device *dev,
        struct ipv6hdr *ipv6h = (struct ipv6hdr *)skb_push(skb, t->hlen);
        __be16 *p = (__be16 *)(ipv6h+1);
 
-       ip6_flow_hdr(ipv6h, 0, t->fl.u.ip6.flowlabel);
+       ip6_flow_hdr(ipv6h, 0,
+                    ip6_make_flowlabel(dev_net(dev), skb,
+                                       t->fl.u.ip6.flowlabel, false));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = NEXTHDR_GRE;
        ipv6h->saddr = t->parms.laddr;
@@ -1323,7 +1327,8 @@ static int __net_init ip6gre_init_net(struct net *net)
        int err;
 
        ign->fb_tunnel_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6gre0",
-                                          ip6gre_tunnel_setup);
+                                         NET_NAME_UNKNOWN,
+                                         ip6gre_tunnel_setup);
        if (!ign->fb_tunnel_dev) {
                err = -ENOMEM;
                goto err_alloc_dev;
index cb9df0e..f5dafe6 100644 (file)
@@ -205,7 +205,8 @@ int ip6_xmit(struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
        if (hlimit < 0)
                hlimit = ip6_dst_hoplimit(dst);
 
-       ip6_flow_hdr(hdr, tclass, fl6->flowlabel);
+       ip6_flow_hdr(hdr, tclass, ip6_make_flowlabel(net, skb, fl6->flowlabel,
+                                                    np->autoflowlabel));
 
        hdr->payload_len = htons(seg_len);
        hdr->nexthdr = proto;
@@ -545,6 +546,8 @@ static void ipv6_select_ident(struct frag_hdr *fhdr, struct rt6_info *rt)
        net_get_random_once(&ip6_idents_hashrnd, sizeof(ip6_idents_hashrnd));
 
        hash = __ipv6_addr_jhash(&rt->rt6i_dst.addr, ip6_idents_hashrnd);
+       hash = __ipv6_addr_jhash(&rt->rt6i_src.addr, hash);
+
        id = ip_idents_reserve(hash, 1);
        fhdr->identification = htonl(id);
 }
@@ -800,8 +803,8 @@ slow_path:
                /*
                 *      Copy a block of the IP datagram.
                 */
-               if (skb_copy_bits(skb, ptr, skb_transport_header(frag), len))
-                       BUG();
+               BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
+                                    len));
                left -= len;
 
                fh->frag_off = htons(offset);
@@ -1269,8 +1272,7 @@ emsgsize:
                }
        }
 
-       /* For UDP, check if TX timestamp is enabled */
-       if (sk->sk_type == SOCK_DGRAM)
+       if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW)
                sock_tx_timestamp(sk, &tx_flags);
 
        /*
@@ -1379,12 +1381,6 @@ alloc_new_skb:
                                                           sk->sk_allocation);
                                if (unlikely(skb == NULL))
                                        err = -ENOBUFS;
-                               else {
-                                       /* Only the initial fragment
-                                        * is time stamped.
-                                        */
-                                       tx_flags = 0;
-                               }
                        }
                        if (skb == NULL)
                                goto error;
@@ -1398,8 +1394,9 @@ alloc_new_skb:
                        skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
                                    dst_exthdrlen);
 
-                       if (sk->sk_type == SOCK_DGRAM)
-                               skb_shinfo(skb)->tx_flags = tx_flags;
+                       /* Only the initial fragment is time stamped */
+                       skb_shinfo(skb)->tx_flags = tx_flags;
+                       tx_flags = 0;
 
                        /*
                         *      Find where to start putting bytes
@@ -1569,7 +1566,9 @@ int ip6_push_pending_frames(struct sock *sk)
        skb_reset_network_header(skb);
        hdr = ipv6_hdr(skb);
 
-       ip6_flow_hdr(hdr, np->cork.tclass, fl6->flowlabel);
+       ip6_flow_hdr(hdr, np->cork.tclass,
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel,
+                                       np->autoflowlabel));
        hdr->hop_limit = np->cork.hop_limit;
        hdr->nexthdr = proto;
        hdr->saddr = fl6->saddr;
index afa0824..f9de5a6 100644 (file)
@@ -315,7 +315,8 @@ static struct ip6_tnl *ip6_tnl_create(struct net *net, struct __ip6_tnl_parm *p)
        else
                sprintf(name, "ip6tnl%%d");
 
-       dev = alloc_netdev(sizeof (*t), name, ip6_tnl_dev_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+                          ip6_tnl_dev_setup);
        if (dev == NULL)
                goto failed;
 
@@ -1046,7 +1047,8 @@ static int ip6_tnl_xmit2(struct sk_buff *skb,
        skb_push(skb, sizeof(struct ipv6hdr));
        skb_reset_network_header(skb);
        ipv6h = ipv6_hdr(skb);
-       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield), fl6->flowlabel);
+       ip6_flow_hdr(ipv6h, INET_ECN_encapsulate(0, dsfield),
+                    ip6_make_flowlabel(net, skb, fl6->flowlabel, false));
        ipv6h->hop_limit = t->parms.hop_limit;
        ipv6h->nexthdr = proto;
        ipv6h->saddr = fl6->saddr;
@@ -1772,7 +1774,7 @@ static int __net_init ip6_tnl_init_net(struct net *net)
 
        err = -ENOMEM;
        ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6tnl0",
-                                     ip6_tnl_dev_setup);
+                                       NET_NAME_UNKNOWN, ip6_tnl_dev_setup);
 
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
index 9aaa6bb..7f52fd9 100644 (file)
@@ -204,7 +204,7 @@ static struct ip6_tnl *vti6_tnl_create(struct net *net, struct __ip6_tnl_parm *p
        else
                sprintf(name, "ip6_vti%%d");
 
-       dev = alloc_netdev(sizeof(*t), name, vti6_dev_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN, vti6_dev_setup);
        if (dev == NULL)
                goto failed;
 
@@ -1020,7 +1020,7 @@ static int __net_init vti6_init_net(struct net *net)
 
        err = -ENOMEM;
        ip6n->fb_tnl_dev = alloc_netdev(sizeof(struct ip6_tnl), "ip6_vti0",
-                                       vti6_dev_setup);
+                                       NET_NAME_UNKNOWN, vti6_dev_setup);
 
        if (!ip6n->fb_tnl_dev)
                goto err_alloc_dev;
@@ -1089,36 +1089,26 @@ static struct xfrm6_protocol vti_ipcomp6_protocol __read_mostly = {
  **/
 static int __init vti6_tunnel_init(void)
 {
-       int  err;
+       const char *msg;
+       int err;
 
+       msg = "tunnel device";
        err = register_pernet_device(&vti6_net_ops);
        if (err < 0)
-               goto out_pernet;
+               goto pernet_dev_failed;
 
+       msg = "tunnel protocols";
        err = xfrm6_protocol_register(&vti_esp6_protocol, IPPROTO_ESP);
-       if (err < 0) {
-               pr_err("%s: can't register vti6 protocol\n", __func__);
-
-               goto out;
-       }
-
+       if (err < 0)
+               goto xfrm_proto_esp_failed;
        err = xfrm6_protocol_register(&vti_ah6_protocol, IPPROTO_AH);
-       if (err < 0) {
-               xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               pr_err("%s: can't register vti6 protocol\n", __func__);
-
-               goto out;
-       }
-
+       if (err < 0)
+               goto xfrm_proto_ah_failed;
        err = xfrm6_protocol_register(&vti_ipcomp6_protocol, IPPROTO_COMP);
-       if (err < 0) {
-               xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
-               xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-               pr_err("%s: can't register vti6 protocol\n", __func__);
-
-               goto out;
-       }
+       if (err < 0)
+               goto xfrm_proto_comp_failed;
 
+       msg = "netlink interface";
        err = rtnl_link_register(&vti6_link_ops);
        if (err < 0)
                goto rtnl_link_failed;
@@ -1127,11 +1117,14 @@ static int __init vti6_tunnel_init(void)
 
 rtnl_link_failed:
        xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP);
+xfrm_proto_comp_failed:
        xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
+xfrm_proto_ah_failed:
        xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
-out:
+xfrm_proto_esp_failed:
        unregister_pernet_device(&vti6_net_ops);
-out_pernet:
+pernet_dev_failed:
+       pr_err("vti6 init: failed to register %s\n", msg);
        return err;
 }
 
@@ -1141,13 +1134,9 @@ out_pernet:
 static void __exit vti6_tunnel_cleanup(void)
 {
        rtnl_link_unregister(&vti6_link_ops);
-       if (xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP))
-               pr_info("%s: can't deregister protocol\n", __func__);
-       if (xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH))
-               pr_info("%s: can't deregister protocol\n", __func__);
-       if (xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP))
-               pr_info("%s: can't deregister protocol\n", __func__);
-
+       xfrm6_protocol_deregister(&vti_ipcomp6_protocol, IPPROTO_COMP);
+       xfrm6_protocol_deregister(&vti_ah6_protocol, IPPROTO_AH);
+       xfrm6_protocol_deregister(&vti_esp6_protocol, IPPROTO_ESP);
        unregister_pernet_device(&vti6_net_ops);
 }
 
index 8250474..f9a3fd3 100644 (file)
@@ -744,7 +744,7 @@ static struct net_device *ip6mr_reg_vif(struct net *net, struct mr6_table *mrt)
        else
                sprintf(name, "pim6reg%u", mrt->id);
 
-       dev = alloc_netdev(0, name, reg_vif_setup);
+       dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, reg_vif_setup);
        if (dev == NULL)
                return NULL;
 
index edb58af..0c28998 100644 (file)
@@ -235,7 +235,7 @@ static int do_ipv6_setsockopt(struct sock *sk, int level, int optname,
                if (optlen < sizeof(int) ||
                    inet_sk(sk)->inet_num)
                        goto e_inval;
-               np->ipv6only = valbool;
+               sk->sk_ipv6only = valbool;
                retv = 0;
                break;
 
@@ -834,6 +834,10 @@ pref_skip_coa:
                np->dontfrag = valbool;
                retv = 0;
                break;
+       case IPV6_AUTOFLOWLABEL:
+               np->autoflowlabel = valbool;
+               retv = 0;
+               break;
        }
 
        release_sock(sk);
@@ -1058,7 +1062,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
        }
 
        case IPV6_V6ONLY:
-               val = np->ipv6only;
+               val = sk->sk_ipv6only;
                break;
 
        case IPV6_RECVPKTINFO:
@@ -1158,7 +1162,6 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                        return -EFAULT;
 
                return 0;
-               break;
        }
 
        case IPV6_TRANSPARENT:
@@ -1273,6 +1276,10 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
                val = np->dontfrag;
                break;
 
+       case IPV6_AUTOFLOWLABEL:
+               val = np->autoflowlabel;
+               break;
+
        default:
                return -ENOPROTOOPT;
        }
index 08b367c..617f095 100644 (file)
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
        len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
        len -= skb_network_header_len(skb);
 
-       /* Drop queries with not link local source */
-       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL))
+       /* RFC3810 6.2
+        * Upon reception of an MLD message that contains a Query, the node
+        * checks if the source address of the message is a valid link-local
+        * address, if the Hop Limit is set to 1, and if the Router Alert
+        * option is present in the Hop-By-Hop Options header of the IPv6
+        * packet.  If any of these checks fails, the packet is dropped.
+        */
+       if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
+           ipv6_hdr(skb)->hop_limit != 1 ||
+           !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
+           IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
                return -EINVAL;
 
        idev = __in6_dev_get(skb->dev);
index ca8d4ea..339078f 100644 (file)
@@ -1070,6 +1070,9 @@ static void ndisc_router_discovery(struct sk_buff *skb)
        optlen = (skb_tail_pointer(skb) - skb_transport_header(skb)) -
                sizeof(struct ra_msg);
 
+       ND_PRINTK(2, info,
+                 "RA: %s, dev: %s\n",
+                 __func__, skb->dev->name);
        if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) {
                ND_PRINTK(2, warn, "RA: source address is not link-local\n");
                return;
@@ -1102,13 +1105,21 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                return;
        }
 
-       if (!ipv6_accept_ra(in6_dev))
+       if (!ipv6_accept_ra(in6_dev)) {
+               ND_PRINTK(2, info,
+                         "RA: %s, did not accept ra for dev: %s\n",
+                         __func__, skb->dev->name);
                goto skip_linkparms;
+       }
 
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        /* skip link-specific parameters from interior routers */
-       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
+       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
+               ND_PRINTK(2, info,
+                         "RA: %s, nodetype is NODEFAULT, dev: %s\n",
+                         __func__, skb->dev->name);
                goto skip_linkparms;
+       }
 #endif
 
        if (in6_dev->if_flags & IF_RS_SENT) {
@@ -1130,11 +1141,24 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                                (ra_msg->icmph.icmp6_addrconf_other ?
                                        IF_RA_OTHERCONF : 0);
 
-       if (!in6_dev->cnf.accept_ra_defrtr)
+       if (!in6_dev->cnf.accept_ra_defrtr) {
+               ND_PRINTK(2, info,
+                         "RA: %s, defrtr is false for dev: %s\n",
+                         __func__, skb->dev->name);
                goto skip_defrtr;
+       }
 
-       if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+       /* Do not accept RA with source-addr found on local machine unless
+        * accept_ra_from_local is set to true.
+        */
+       if (!in6_dev->cnf.accept_ra_from_local &&
+           ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
+                         NULL, 0)) {
+               ND_PRINTK(2, info,
+                         "RA from local address detected on dev: %s: default router ignored\n",
+                         skb->dev->name);
                goto skip_defrtr;
+       }
 
        lifetime = ntohs(ra_msg->icmph.icmp6_rt_lifetime);
 
@@ -1163,8 +1187,10 @@ static void ndisc_router_discovery(struct sk_buff *skb)
                rt = NULL;
        }
 
+       ND_PRINTK(3, info, "RA: rt: %p  lifetime: %d, for dev: %s\n",
+                 rt, lifetime, skb->dev->name);
        if (rt == NULL && lifetime) {
-               ND_PRINTK(3, dbg, "RA: adding default router\n");
+               ND_PRINTK(3, info, "RA: adding default router\n");
 
                rt = rt6_add_dflt_router(&ipv6_hdr(skb)->saddr, skb->dev, pref);
                if (rt == NULL) {
@@ -1260,12 +1286,22 @@ skip_linkparms:
                             NEIGH_UPDATE_F_ISROUTER);
        }
 
-       if (!ipv6_accept_ra(in6_dev))
+       if (!ipv6_accept_ra(in6_dev)) {
+               ND_PRINTK(2, info,
+                         "RA: %s, accept_ra is false for dev: %s\n",
+                         __func__, skb->dev->name);
                goto out;
+       }
 
 #ifdef CONFIG_IPV6_ROUTE_INFO
-       if (ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr, NULL, 0))
+       if (!in6_dev->cnf.accept_ra_from_local &&
+           ipv6_chk_addr(dev_net(in6_dev->dev), &ipv6_hdr(skb)->saddr,
+                         NULL, 0)) {
+               ND_PRINTK(2, info,
+                         "RA from local address detected on dev: %s: router info ignored.\n",
+                         skb->dev->name);
                goto skip_routeinfo;
+       }
 
        if (in6_dev->cnf.accept_ra_rtr_pref && ndopts.nd_opts_ri) {
                struct nd_opt_hdr *p;
@@ -1293,8 +1329,12 @@ skip_routeinfo:
 
 #ifdef CONFIG_IPV6_NDISC_NODETYPE
        /* skip link-specific ndopts from interior routers */
-       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT)
+       if (skb->ndisc_nodetype == NDISC_NODETYPE_NODEFAULT) {
+               ND_PRINTK(2, info,
+                         "RA: %s, nodetype is NODEFAULT (interior routes), dev: %s\n",
+                         __func__, skb->dev->name);
                goto out;
+       }
 #endif
 
        if (in6_dev->cnf.accept_ra_pinfo && ndopts.nd_opts_pi) {
@@ -1728,7 +1768,7 @@ int __init ndisc_init(void)
 
 #ifdef CONFIG_SYSCTL
        err = neigh_sysctl_register(NULL, &nd_tbl.parms,
-                                   &ndisc_ifinfo_sysctl_change);
+                                   ndisc_ifinfo_sysctl_change);
        if (err)
                goto out_unregister_pernet;
 out:
index 4bff1f2..ac93df1 100644 (file)
@@ -55,6 +55,11 @@ config NFT_REJECT_IPV6
        default NFT_REJECT
        tristate
 
+config NF_LOG_IPV6
+       tristate "IPv6 packet logging"
+       depends on NETFILTER_ADVANCED
+       select NF_LOG_COMMON
+
 config IP6_NF_IPTABLES
        tristate "IP6 tables support (required for filtering)"
        depends on INET && IPV6
index 70d3dd6..c0b2631 100644 (file)
@@ -23,6 +23,9 @@ obj-$(CONFIG_NF_NAT_IPV6) += nf_nat_ipv6.o
 nf_defrag_ipv6-y := nf_defrag_ipv6_hooks.o nf_conntrack_reasm.o
 obj-$(CONFIG_NF_DEFRAG_IPV6) += nf_defrag_ipv6.o
 
+# logging
+obj-$(CONFIG_NF_LOG_IPV6) += nf_log_ipv6.o
+
 # nf_tables
 obj-$(CONFIG_NF_TABLES_IPV6) += nf_tables_ipv6.o
 obj-$(CONFIG_NFT_CHAIN_ROUTE_IPV6) += nft_chain_route_ipv6.o
index 54bd979..8b14744 100644 (file)
@@ -94,7 +94,6 @@ ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
                        break;
                default:
                        return false;
-                       break;
                }
 
                nexthdr = hp->nexthdr;
index 0d5279f..6f187c8 100644 (file)
@@ -50,6 +50,7 @@
 #include <linux/module.h>
 #include <net/netfilter/ipv6/nf_defrag_ipv6.h>
 
+static const char nf_frags_cache_name[] = "nf-frags";
 
 struct nf_ct_frag6_skb_cb
 {
@@ -63,6 +64,8 @@ struct nf_ct_frag6_skb_cb
 static struct inet_frags nf_frags;
 
 #ifdef CONFIG_SYSCTL
+static int zero;
+
 static struct ctl_table nf_ct_frag6_sysctl_table[] = {
        {
                .procname       = "nf_conntrack_frag6_timeout",
@@ -76,14 +79,17 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
                .data           = &init_net.nf_frag.frags.low_thresh,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &init_net.nf_frag.frags.high_thresh
        },
        {
                .procname       = "nf_conntrack_frag6_high_thresh",
                .data           = &init_net.nf_frag.frags.high_thresh,
                .maxlen         = sizeof(unsigned int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec,
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &init_net.nf_frag.frags.low_thresh
        },
        { }
 };
@@ -102,7 +108,10 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
 
                table[0].data = &net->nf_frag.frags.timeout;
                table[1].data = &net->nf_frag.frags.low_thresh;
+               table[1].extra2 = &net->nf_frag.frags.high_thresh;
                table[2].data = &net->nf_frag.frags.high_thresh;
+               table[2].extra1 = &net->nf_frag.frags.low_thresh;
+               table[2].extra2 = &init_net.nf_frag.frags.high_thresh;
        }
 
        hdr = register_net_sysctl(net, "net/netfilter", table);
@@ -147,16 +156,13 @@ static inline u8 ip6_frag_ecn(const struct ipv6hdr *ipv6h)
 static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
                                 const struct in6_addr *daddr)
 {
-       u32 c;
-
        net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
-       c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-                        (__force u32)id, nf_frags.rnd);
-       return c & (INETFRAGS_HASHSZ - 1);
+       return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+                           (__force u32)id, nf_frags.rnd);
 }
 
 
-static unsigned int nf_hashfn(struct inet_frag_queue *q)
+static unsigned int nf_hashfn(const struct inet_frag_queue *q)
 {
        const struct frag_queue *nq;
 
@@ -196,7 +202,7 @@ static inline struct frag_queue *fq_find(struct net *net, __be32 id,
        arg.dst = dst;
        arg.ecn = ecn;
 
-       read_lock_bh(&nf_frags.lock);
+       local_bh_disable();
        hash = nf_hash_frag(id, src, dst);
 
        q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
@@ -217,7 +223,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
        int offset, end;
        u8 ecn;
 
-       if (fq->q.last_in & INET_FRAG_COMPLETE) {
+       if (fq->q.flags & INET_FRAG_COMPLETE) {
                pr_debug("Already completed\n");
                goto err;
        }
@@ -248,11 +254,11 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
                 * or have different end, the segment is corrupted.
                 */
                if (end < fq->q.len ||
-                   ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) {
+                   ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) {
                        pr_debug("already received last fragment\n");
                        goto err;
                }
-               fq->q.last_in |= INET_FRAG_LAST_IN;
+               fq->q.flags |= INET_FRAG_LAST_IN;
                fq->q.len = end;
        } else {
                /* Check if the fragment is rounded to 8 bytes.
@@ -267,7 +273,7 @@ static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb,
                }
                if (end > fq->q.len) {
                        /* Some bits beyond end -> corruption. */
-                       if (fq->q.last_in & INET_FRAG_LAST_IN) {
+                       if (fq->q.flags & INET_FRAG_LAST_IN) {
                                pr_debug("last packet already reached.\n");
                                goto err;
                        }
@@ -349,10 +355,9 @@ found:
         */
        if (offset == 0) {
                fq->nhoffset = nhoff;
-               fq->q.last_in |= INET_FRAG_FIRST_IN;
+               fq->q.flags |= INET_FRAG_FIRST_IN;
        }
 
-       inet_frag_lru_move(&fq->q);
        return 0;
 
 discard_fq:
@@ -597,10 +602,6 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
        hdr = ipv6_hdr(clone);
        fhdr = (struct frag_hdr *)skb_transport_header(clone);
 
-       local_bh_disable();
-       inet_frag_evictor(&net->nf_frag.frags, &nf_frags, false);
-       local_bh_enable();
-
        fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
                     ip6_frag_ecn(hdr));
        if (fq == NULL) {
@@ -617,7 +618,7 @@ struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user)
                goto ret_orig;
        }
 
-       if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+       if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            fq->q.meat == fq->q.len) {
                ret_skb = nf_ct_frag6_reasm(fq, dev);
                if (ret_skb == NULL)
@@ -677,13 +678,15 @@ int nf_ct_frag6_init(void)
        nf_frags.qsize = sizeof(struct frag_queue);
        nf_frags.match = ip6_frag_match;
        nf_frags.frag_expire = nf_ct_frag6_expire;
-       nf_frags.secret_interval = 10 * 60 * HZ;
-       inet_frags_init(&nf_frags);
-
+       nf_frags.frags_cache_name = nf_frags_cache_name;
+       ret = inet_frags_init(&nf_frags);
+       if (ret)
+               goto out;
        ret = register_pernet_subsys(&nf_ct_net_ops);
        if (ret)
                inet_frags_fini(&nf_frags);
 
+out:
        return ret;
 }
 
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
new file mode 100644 (file)
index 0000000..7b17a0b
--- /dev/null
@@ -0,0 +1,417 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/ipv6.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+static struct nf_loginfo default_loginfo = {
+       .type   = NF_LOG_TYPE_LOG,
+       .u = {
+               .log = {
+                       .level    = 5,
+                       .logflags = NF_LOG_MASK,
+               },
+       },
+};
+
+/* One level of recursion won't kill us */
+static void dump_ipv6_packet(struct nf_log_buf *m,
+                            const struct nf_loginfo *info,
+                            const struct sk_buff *skb, unsigned int ip6hoff,
+                            int recurse)
+{
+       u_int8_t currenthdr;
+       int fragment;
+       struct ipv6hdr _ip6h;
+       const struct ipv6hdr *ih;
+       unsigned int ptr;
+       unsigned int hdrlen = 0;
+       unsigned int logflags;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+       else
+               logflags = NF_LOG_MASK;
+
+       ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
+       if (ih == NULL) {
+               nf_log_buf_add(m, "TRUNCATED");
+               return;
+       }
+
+       /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
+       nf_log_buf_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
+
+       /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
+       nf_log_buf_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
+              ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
+              (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
+              ih->hop_limit,
+              (ntohl(*(__be32 *)ih) & 0x000fffff));
+
+       fragment = 0;
+       ptr = ip6hoff + sizeof(struct ipv6hdr);
+       currenthdr = ih->nexthdr;
+       while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
+               struct ipv6_opt_hdr _hdr;
+               const struct ipv6_opt_hdr *hp;
+
+               hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
+               if (hp == NULL) {
+                       nf_log_buf_add(m, "TRUNCATED");
+                       return;
+               }
+
+               /* Max length: 48 "OPT (...) " */
+               if (logflags & XT_LOG_IPOPT)
+                       nf_log_buf_add(m, "OPT ( ");
+
+               switch (currenthdr) {
+               case IPPROTO_FRAGMENT: {
+                       struct frag_hdr _fhdr;
+                       const struct frag_hdr *fh;
+
+                       nf_log_buf_add(m, "FRAG:");
+                       fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
+                                               &_fhdr);
+                       if (fh == NULL) {
+                               nf_log_buf_add(m, "TRUNCATED ");
+                               return;
+                       }
+
+                       /* Max length: 6 "65535 " */
+                       nf_log_buf_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
+
+                       /* Max length: 11 "INCOMPLETE " */
+                       if (fh->frag_off & htons(0x0001))
+                               nf_log_buf_add(m, "INCOMPLETE ");
+
+                       nf_log_buf_add(m, "ID:%08x ",
+                                      ntohl(fh->identification));
+
+                       if (ntohs(fh->frag_off) & 0xFFF8)
+                               fragment = 1;
+
+                       hdrlen = 8;
+
+                       break;
+               }
+               case IPPROTO_DSTOPTS:
+               case IPPROTO_ROUTING:
+               case IPPROTO_HOPOPTS:
+                       if (fragment) {
+                               if (logflags & XT_LOG_IPOPT)
+                                       nf_log_buf_add(m, ")");
+                               return;
+                       }
+                       hdrlen = ipv6_optlen(hp);
+                       break;
+               /* Max Length */
+               case IPPROTO_AH:
+                       if (logflags & XT_LOG_IPOPT) {
+                               struct ip_auth_hdr _ahdr;
+                               const struct ip_auth_hdr *ah;
+
+                               /* Max length: 3 "AH " */
+                               nf_log_buf_add(m, "AH ");
+
+                               if (fragment) {
+                                       nf_log_buf_add(m, ")");
+                                       return;
+                               }
+
+                               ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
+                                                       &_ahdr);
+                               if (ah == NULL) {
+                                       /*
+                                        * Max length: 26 "INCOMPLETE [65535
+                                        *  bytes] )"
+                                        */
+                                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
+                                                      skb->len - ptr);
+                                       return;
+                               }
+
+                               /* Length: 15 "SPI=0xF1234567 */
+                               nf_log_buf_add(m, "SPI=0x%x ", ntohl(ah->spi));
+
+                       }
+
+                       hdrlen = (hp->hdrlen+2)<<2;
+                       break;
+               case IPPROTO_ESP:
+                       if (logflags & XT_LOG_IPOPT) {
+                               struct ip_esp_hdr _esph;
+                               const struct ip_esp_hdr *eh;
+
+                               /* Max length: 4 "ESP " */
+                               nf_log_buf_add(m, "ESP ");
+
+                               if (fragment) {
+                                       nf_log_buf_add(m, ")");
+                                       return;
+                               }
+
+                               /*
+                                * Max length: 26 "INCOMPLETE [65535 bytes] )"
+                                */
+                               eh = skb_header_pointer(skb, ptr, sizeof(_esph),
+                                                       &_esph);
+                               if (eh == NULL) {
+                                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] )",
+                                                      skb->len - ptr);
+                                       return;
+                               }
+
+                               /* Length: 16 "SPI=0xF1234567 )" */
+                               nf_log_buf_add(m, "SPI=0x%x )",
+                                              ntohl(eh->spi));
+                       }
+                       return;
+               default:
+                       /* Max length: 20 "Unknown Ext Hdr 255" */
+                       nf_log_buf_add(m, "Unknown Ext Hdr %u", currenthdr);
+                       return;
+               }
+               if (logflags & XT_LOG_IPOPT)
+                       nf_log_buf_add(m, ") ");
+
+               currenthdr = hp->nexthdr;
+               ptr += hdrlen;
+       }
+
+       switch (currenthdr) {
+       case IPPROTO_TCP:
+               if (nf_log_dump_tcp_header(m, skb, currenthdr, fragment,
+                                          ptr, logflags))
+                       return;
+               break;
+       case IPPROTO_UDP:
+       case IPPROTO_UDPLITE:
+               if (nf_log_dump_udp_header(m, skb, currenthdr, fragment, ptr))
+                       return;
+               break;
+       case IPPROTO_ICMPV6: {
+               struct icmp6hdr _icmp6h;
+               const struct icmp6hdr *ic;
+
+               /* Max length: 13 "PROTO=ICMPv6 " */
+               nf_log_buf_add(m, "PROTO=ICMPv6 ");
+
+               if (fragment)
+                       break;
+
+               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+               ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
+               if (ic == NULL) {
+                       nf_log_buf_add(m, "INCOMPLETE [%u bytes] ",
+                                      skb->len - ptr);
+                       return;
+               }
+
+               /* Max length: 18 "TYPE=255 CODE=255 " */
+               nf_log_buf_add(m, "TYPE=%u CODE=%u ",
+                              ic->icmp6_type, ic->icmp6_code);
+
+               switch (ic->icmp6_type) {
+               case ICMPV6_ECHO_REQUEST:
+               case ICMPV6_ECHO_REPLY:
+                       /* Max length: 19 "ID=65535 SEQ=65535 " */
+                       nf_log_buf_add(m, "ID=%u SEQ=%u ",
+                               ntohs(ic->icmp6_identifier),
+                               ntohs(ic->icmp6_sequence));
+                       break;
+               case ICMPV6_MGM_QUERY:
+               case ICMPV6_MGM_REPORT:
+               case ICMPV6_MGM_REDUCTION:
+                       break;
+
+               case ICMPV6_PARAMPROB:
+                       /* Max length: 17 "POINTER=ffffffff " */
+                       nf_log_buf_add(m, "POINTER=%08x ",
+                                      ntohl(ic->icmp6_pointer));
+                       /* Fall through */
+               case ICMPV6_DEST_UNREACH:
+               case ICMPV6_PKT_TOOBIG:
+               case ICMPV6_TIME_EXCEED:
+                       /* Max length: 3+maxlen */
+                       if (recurse) {
+                               nf_log_buf_add(m, "[");
+                               dump_ipv6_packet(m, info, skb,
+                                                ptr + sizeof(_icmp6h), 0);
+                               nf_log_buf_add(m, "] ");
+                       }
+
+                       /* Max length: 10 "MTU=65535 " */
+                       if (ic->icmp6_type == ICMPV6_PKT_TOOBIG) {
+                               nf_log_buf_add(m, "MTU=%u ",
+                                              ntohl(ic->icmp6_mtu));
+                       }
+               }
+               break;
+       }
+       /* Max length: 10 "PROTO=255 " */
+       default:
+               nf_log_buf_add(m, "PROTO=%u ", currenthdr);
+       }
+
+       /* Max length: 15 "UID=4294967295 " */
+       if ((logflags & XT_LOG_UID) && recurse)
+               nf_log_dump_sk_uid_gid(m, skb->sk);
+
+       /* Max length: 16 "MARK=0xFFFFFFFF " */
+       if (recurse && skb->mark)
+               nf_log_buf_add(m, "MARK=0x%x ", skb->mark);
+}
+
+static void dump_ipv6_mac_header(struct nf_log_buf *m,
+                                const struct nf_loginfo *info,
+                                const struct sk_buff *skb)
+{
+       struct net_device *dev = skb->dev;
+       unsigned int logflags = 0;
+
+       if (info->type == NF_LOG_TYPE_LOG)
+               logflags = info->u.log.logflags;
+
+       if (!(logflags & XT_LOG_MACDECODE))
+               goto fallback;
+
+       switch (dev->type) {
+       case ARPHRD_ETHER:
+               nf_log_buf_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
+                      eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
+                      ntohs(eth_hdr(skb)->h_proto));
+               return;
+       default:
+               break;
+       }
+
+fallback:
+       nf_log_buf_add(m, "MAC=");
+       if (dev->hard_header_len &&
+           skb->mac_header != skb->network_header) {
+               const unsigned char *p = skb_mac_header(skb);
+               unsigned int len = dev->hard_header_len;
+               unsigned int i;
+
+               if (dev->type == ARPHRD_SIT) {
+                       p -= ETH_HLEN;
+
+                       if (p < skb->head)
+                               p = NULL;
+               }
+
+               if (p != NULL) {
+                       nf_log_buf_add(m, "%02x", *p++);
+                       for (i = 1; i < len; i++)
+                               nf_log_buf_add(m, ":%02x", *p++);
+               }
+               nf_log_buf_add(m, " ");
+
+               if (dev->type == ARPHRD_SIT) {
+                       const struct iphdr *iph =
+                               (struct iphdr *)skb_mac_header(skb);
+                       nf_log_buf_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
+                                      &iph->daddr);
+               }
+       } else {
+               nf_log_buf_add(m, " ");
+       }
+}
+
+static void nf_log_ip6_packet(struct net *net, u_int8_t pf,
+                             unsigned int hooknum, const struct sk_buff *skb,
+                             const struct net_device *in,
+                             const struct net_device *out,
+                             const struct nf_loginfo *loginfo,
+                             const char *prefix)
+{
+       struct nf_log_buf *m;
+
+       /* FIXME: Disabled from containers until syslog ns is supported */
+       if (!net_eq(net, &init_net))
+               return;
+
+       m = nf_log_buf_open();
+
+       if (!loginfo)
+               loginfo = &default_loginfo;
+
+       nf_log_dump_packet_common(m, pf, hooknum, skb, in, out,
+                                 loginfo, prefix);
+
+       if (in != NULL)
+               dump_ipv6_mac_header(m, loginfo, skb);
+
+       dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
+
+       nf_log_buf_close(m);
+}
+
+static struct nf_logger nf_ip6_logger __read_mostly = {
+       .name           = "nf_log_ipv6",
+       .type           = NF_LOG_TYPE_LOG,
+       .logfn          = nf_log_ip6_packet,
+       .me             = THIS_MODULE,
+};
+
+static int __net_init nf_log_ipv6_net_init(struct net *net)
+{
+       nf_log_set(net, NFPROTO_IPV6, &nf_ip6_logger);
+       return 0;
+}
+
+static void __net_exit nf_log_ipv6_net_exit(struct net *net)
+{
+       nf_log_unset(net, &nf_ip6_logger);
+}
+
+static struct pernet_operations nf_log_ipv6_net_ops = {
+       .init = nf_log_ipv6_net_init,
+       .exit = nf_log_ipv6_net_exit,
+};
+
+static int __init nf_log_ipv6_init(void)
+{
+       int ret;
+
+       ret = register_pernet_subsys(&nf_log_ipv6_net_ops);
+       if (ret < 0)
+               return ret;
+
+       nf_log_register(NFPROTO_IPV6, &nf_ip6_logger);
+       return 0;
+}
+
+static void __exit nf_log_ipv6_exit(void)
+{
+       unregister_pernet_subsys(&nf_log_ipv6_net_ops);
+       nf_log_unregister(&nf_ip6_logger);
+}
+
+module_init(nf_log_ipv6_init);
+module_exit(nf_log_ipv6_exit);
+
+MODULE_AUTHOR("Netfilter Core Team <coreteam@netfilter.org>");
+MODULE_DESCRIPTION("Netfilter IPv4 packet logging");
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NF_LOGGER(AF_INET6, 0);
index abfe75a..fc8e49b 100644 (file)
@@ -158,6 +158,7 @@ static void nf_nat_ipv6_csum_recalc(struct sk_buff *skb,
                                         htons(oldlen), htons(datalen), 1);
 }
 
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
                                       struct nf_nat_range *range)
 {
@@ -175,6 +176,7 @@ static int nf_nat_ipv6_nlattr_to_range(struct nlattr *tb[],
 
        return 0;
 }
+#endif
 
 static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
        .l3proto                = NFPROTO_IPV6,
@@ -183,7 +185,9 @@ static const struct nf_nat_l3proto nf_nat_l3proto_ipv6 = {
        .manip_pkt              = nf_nat_ipv6_manip_pkt,
        .csum_update            = nf_nat_ipv6_csum_update,
        .csum_recalc            = nf_nat_ipv6_csum_recalc,
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_ipv6_nlattr_to_range,
+#endif
 #ifdef CONFIG_XFRM
        .decode_session = nf_nat_ipv6_decode_session,
 #endif
index 3317440..2d6f860 100644 (file)
@@ -33,6 +33,7 @@
 static int sockstat6_seq_show(struct seq_file *seq, void *v)
 {
        struct net *net = seq->private;
+       unsigned int frag_mem = ip6_frag_mem(net);
 
        seq_printf(seq, "TCP6: inuse %d\n",
                       sock_prot_inuse_get(net, &tcpv6_prot));
@@ -42,8 +43,7 @@ static int sockstat6_seq_show(struct seq_file *seq, void *v)
                        sock_prot_inuse_get(net, &udplitev6_prot));
        seq_printf(seq, "RAW6: inuse %d\n",
                       sock_prot_inuse_get(net, &rawv6_prot));
-       seq_printf(seq, "FRAG6: inuse %d memory %d\n",
-                      ip6_frag_nqueues(net), ip6_frag_mem(net));
+       seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
        return 0;
 }
 
index b2dc60b..39d4422 100644 (file)
@@ -176,7 +176,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
                goto out;
 
        net = dev_net(skb->dev);
-       sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, IP6CB(skb)->iif);
+       sk = __raw_v6_lookup(net, sk, nexthdr, daddr, saddr, inet6_iif(skb));
 
        while (sk) {
                int filtered;
@@ -220,7 +220,7 @@ static bool ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
                        }
                }
                sk = __raw_v6_lookup(net, sk_next(sk), nexthdr, daddr, saddr,
-                                    IP6CB(skb)->iif);
+                                    inet6_iif(skb));
        }
 out:
        read_unlock(&raw_v6_hashinfo.lock);
@@ -375,7 +375,7 @@ void raw6_icmp_error(struct sk_buff *skb, int nexthdr,
                net = dev_net(skb->dev);
 
                while ((sk = __raw_v6_lookup(net, sk, nexthdr, saddr, daddr,
-                                               IP6CB(skb)->iif))) {
+                                               inet6_iif(skb)))) {
                        rawv6_err(sk, skb, NULL, type, code,
                                        inner_offset, info);
                        sk = sk_next(sk);
@@ -506,7 +506,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
                sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                sin6->sin6_flowinfo = 0;
                sin6->sin6_scope_id = ipv6_iface_scope_id(&sin6->sin6_addr,
-                                                         IP6CB(skb)->iif);
+                                                         inet6_iif(skb));
                *addr_len = sizeof(*sin6);
        }
 
@@ -588,8 +588,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
        }
 
        offset += skb_transport_offset(skb);
-       if (skb_copy_bits(skb, offset, &csum, 2))
-               BUG();
+       BUG_ON(skb_copy_bits(skb, offset, &csum, 2));
 
        /* in case cksum was not initialized */
        if (unlikely(csum))
@@ -601,8 +600,7 @@ static int rawv6_push_pending_frames(struct sock *sk, struct flowi6 *fl6,
        if (csum == 0 && fl6->flowi6_proto == IPPROTO_UDP)
                csum = CSUM_MANGLED_0;
 
-       if (skb_store_bits(skb, offset, &csum, 2))
-               BUG();
+       BUG_ON(skb_store_bits(skb, offset, &csum, 2));
 
 send:
        err = ip6_push_pending_frames(sk);
index cc85a9b..c6557d9 100644 (file)
@@ -60,6 +60,8 @@
 #include <net/inet_frag.h>
 #include <net/inet_ecn.h>
 
+static const char ip6_frag_cache_name[] = "ip6-frags";
+
 struct ip6frag_skb_cb
 {
        struct inet6_skb_parm   h;
@@ -85,27 +87,23 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
 static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
                                    const struct in6_addr *daddr)
 {
-       u32 c;
-
        net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
-       c = jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
-                        (__force u32)id, ip6_frags.rnd);
-
-       return c & (INETFRAGS_HASHSZ - 1);
+       return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
+                           (__force u32)id, ip6_frags.rnd);
 }
 
-static unsigned int ip6_hashfn(struct inet_frag_queue *q)
+static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
 {
-       struct frag_queue *fq;
+       const struct frag_queue *fq;
 
        fq = container_of(q, struct frag_queue, q);
        return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
 }
 
-bool ip6_frag_match(struct inet_frag_queue *q, void *a)
+bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
 {
-       struct frag_queue *fq;
-       struct ip6_create_arg *arg = a;
+       const struct frag_queue *fq;
+       const struct ip6_create_arg *arg = a;
 
        fq = container_of(q, struct frag_queue, q);
        return  fq->id == arg->id &&
@@ -115,10 +113,10 @@ bool ip6_frag_match(struct inet_frag_queue *q, void *a)
 }
 EXPORT_SYMBOL(ip6_frag_match);
 
-void ip6_frag_init(struct inet_frag_queue *q, void *a)
+void ip6_frag_init(struct inet_frag_queue *q, const void *a)
 {
        struct frag_queue *fq = container_of(q, struct frag_queue, q);
-       struct ip6_create_arg *arg = a;
+       const struct ip6_create_arg *arg = a;
 
        fq->id = arg->id;
        fq->user = arg->user;
@@ -135,7 +133,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
 
        spin_lock(&fq->q.lock);
 
-       if (fq->q.last_in & INET_FRAG_COMPLETE)
+       if (fq->q.flags & INET_FRAG_COMPLETE)
                goto out;
 
        inet_frag_kill(&fq->q, frags);
@@ -145,17 +143,20 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
        if (!dev)
                goto out_rcu_unlock;
 
-       IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
        IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
 
+       if (fq->q.flags & INET_FRAG_EVICTED)
+               goto out_rcu_unlock;
+
+       IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
        /* Don't send error if the first segment did not arrive. */
-       if (!(fq->q.last_in & INET_FRAG_FIRST_IN) || !fq->q.fragments)
+       if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
                goto out_rcu_unlock;
 
-       /*
-          But use as source device on which LAST ARRIVED
-          segment was received. And do not use fq->dev
-          pointer directly, device might already disappeared.
+       /* But use as source device on which LAST ARRIVED
+        * segment was received. And do not use fq->dev
+        * pointer directly, device might already disappeared.
         */
        fq->q.fragments->dev = dev;
        icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
@@ -192,7 +193,6 @@ fq_find(struct net *net, __be32 id, const struct in6_addr *src,
        arg.dst = dst;
        arg.ecn = ecn;
 
-       read_lock(&ip6_frags.lock);
        hash = inet6_hash_frag(id, src, dst);
 
        q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
@@ -212,7 +212,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
        struct net *net = dev_net(skb_dst(skb)->dev);
        u8 ecn;
 
-       if (fq->q.last_in & INET_FRAG_COMPLETE)
+       if (fq->q.flags & INET_FRAG_COMPLETE)
                goto err;
 
        offset = ntohs(fhdr->frag_off) & ~0x7;
@@ -243,9 +243,9 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
                 * or have different end, the segment is corrupted.
                 */
                if (end < fq->q.len ||
-                   ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len))
+                   ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len))
                        goto err;
-               fq->q.last_in |= INET_FRAG_LAST_IN;
+               fq->q.flags |= INET_FRAG_LAST_IN;
                fq->q.len = end;
        } else {
                /* Check if the fragment is rounded to 8 bytes.
@@ -263,7 +263,7 @@ static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb,
                }
                if (end > fq->q.len) {
                        /* Some bits beyond end -> corruption. */
-                       if (fq->q.last_in & INET_FRAG_LAST_IN)
+                       if (fq->q.flags & INET_FRAG_LAST_IN)
                                goto err;
                        fq->q.len = end;
                }
@@ -338,10 +338,10 @@ found:
         */
        if (offset == 0) {
                fq->nhoffset = nhoff;
-               fq->q.last_in |= INET_FRAG_FIRST_IN;
+               fq->q.flags |= INET_FRAG_FIRST_IN;
        }
 
-       if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
+       if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
            fq->q.meat == fq->q.len) {
                int res;
                unsigned long orefdst = skb->_skb_refdst;
@@ -353,14 +353,13 @@ found:
        }
 
        skb_dst_drop(skb);
-       inet_frag_lru_move(&fq->q);
        return -1;
 
 discard_fq:
        inet_frag_kill(&fq->q, &ip6_frags);
 err:
-       IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
-                     IPSTATS_MIB_REASMFAILS);
+       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
+                        IPSTATS_MIB_REASMFAILS);
        kfree_skb(skb);
        return -1;
 }
@@ -523,7 +522,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        struct frag_queue *fq;
        const struct ipv6hdr *hdr = ipv6_hdr(skb);
        struct net *net = dev_net(skb_dst(skb)->dev);
-       int evicted;
 
        if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
                goto fail_hdr;
@@ -552,11 +550,6 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
                return 1;
        }
 
-       evicted = inet_frag_evictor(&net->ipv6.frags, &ip6_frags, false);
-       if (evicted)
-               IP6_ADD_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
-                                IPSTATS_MIB_REASMFAILS, evicted);
-
        fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
                     ip6_frag_ecn(hdr));
        if (fq != NULL) {
@@ -576,7 +569,8 @@ static int ipv6_frag_rcv(struct sk_buff *skb)
        return -1;
 
 fail_hdr:
-       IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_INHDRERRORS);
+       IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)),
+                        IPSTATS_MIB_INHDRERRORS);
        icmpv6_param_prob(skb, ICMPV6_HDR_FIELD, skb_network_header_len(skb));
        return -1;
 }
@@ -588,20 +582,25 @@ static const struct inet6_protocol frag_protocol =
 };
 
 #ifdef CONFIG_SYSCTL
+static int zero;
+
 static struct ctl_table ip6_frags_ns_ctl_table[] = {
        {
                .procname       = "ip6frag_high_thresh",
                .data           = &init_net.ipv6.frags.high_thresh,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &init_net.ipv6.frags.low_thresh
        },
        {
                .procname       = "ip6frag_low_thresh",
                .data           = &init_net.ipv6.frags.low_thresh,
                .maxlen         = sizeof(int),
                .mode           = 0644,
-               .proc_handler   = proc_dointvec
+               .proc_handler   = proc_dointvec_minmax,
+               .extra1         = &zero,
+               .extra2         = &init_net.ipv6.frags.high_thresh
        },
        {
                .procname       = "ip6frag_time",
@@ -613,10 +612,12 @@ static struct ctl_table ip6_frags_ns_ctl_table[] = {
        { }
 };
 
+/* secret interval has been deprecated */
+static int ip6_frags_secret_interval_unused;
 static struct ctl_table ip6_frags_ctl_table[] = {
        {
                .procname       = "ip6frag_secret_interval",
-               .data           = &ip6_frags.secret_interval,
+               .data           = &ip6_frags_secret_interval_unused,
                .maxlen         = sizeof(int),
                .mode           = 0644,
                .proc_handler   = proc_dointvec_jiffies,
@@ -636,7 +637,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
                        goto err_alloc;
 
                table[0].data = &net->ipv6.frags.high_thresh;
+               table[0].extra1 = &net->ipv6.frags.low_thresh;
+               table[0].extra2 = &init_net.ipv6.frags.high_thresh;
                table[1].data = &net->ipv6.frags.low_thresh;
+               table[1].extra2 = &net->ipv6.frags.high_thresh;
                table[2].data = &net->ipv6.frags.timeout;
 
                /* Don't export sysctls to unprivileged users */
@@ -746,8 +750,10 @@ int __init ipv6_frag_init(void)
        ip6_frags.qsize = sizeof(struct frag_queue);
        ip6_frags.match = ip6_frag_match;
        ip6_frags.frag_expire = ip6_frag_expire;
-       ip6_frags.secret_interval = 10 * 60 * HZ;
-       inet_frags_init(&ip6_frags);
+       ip6_frags.frags_cache_name = ip6_frag_cache_name;
+       ret = inet_frags_init(&ip6_frags);
+       if (ret)
+               goto err_pernet;
 out:
        return ret;
 
index 4f40817..2e9ba03 100644 (file)
@@ -250,7 +250,8 @@ static struct ip_tunnel *ipip6_tunnel_locate(struct net *net,
        else
                strcpy(name, "sit%d");
 
-       dev = alloc_netdev(sizeof(*t), name, ipip6_tunnel_setup);
+       dev = alloc_netdev(sizeof(*t), name, NET_NAME_UNKNOWN,
+                          ipip6_tunnel_setup);
        if (dev == NULL)
                return NULL;
 
@@ -1729,6 +1730,7 @@ static int __net_init sit_init_net(struct net *net)
        sitn->tunnels[3] = sitn->tunnels_r_l;
 
        sitn->fb_tunnel_dev = alloc_netdev(sizeof(struct ip_tunnel), "sit0",
+                                          NET_NAME_UNKNOWN,
                                           ipip6_tunnel_setup);
        if (!sitn->fb_tunnel_dev) {
                err = -ENOMEM;
index a822b88..83cea1d 100644 (file)
@@ -187,7 +187,7 @@ struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb)
                goto out;
 
        ret = NULL;
-       req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
+       req = inet_reqsk_alloc(&tcp6_request_sock_ops);
        if (!req)
                goto out;
 
index 058f3ec..5bf7b61 100644 (file)
@@ -38,6 +38,13 @@ static struct ctl_table ipv6_table_template[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec
        },
+       {
+               .procname       = "auto_flowlabels",
+               .data           = &init_net.ipv6.sysctl.auto_flowlabels,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
        {
                .procname       = "fwmark_reflect",
                .data           = &init_net.ipv6.sysctl.fwmark_reflect,
@@ -74,6 +81,7 @@ static int __net_init ipv6_sysctl_net_init(struct net *net)
        ipv6_table[0].data = &net->ipv6.sysctl.bindv6only;
        ipv6_table[1].data = &net->ipv6.sysctl.anycast_src_echo_reply;
        ipv6_table[2].data = &net->ipv6.sysctl.flowlabel_consistency;
+       ipv6_table[3].data = &net->ipv6.sysctl.auto_flowlabels;
 
        ipv6_route_table = ipv6_route_sysctl_init(net);
        if (!ipv6_route_table)
index 229239a..22055b0 100644 (file)
@@ -198,6 +198,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
        sk->sk_v6_daddr = usin->sin6_addr;
        np->flow_label = fl6.flowlabel;
 
+       ip6_set_txhash(sk);
+
        /*
         *      TCP over IPv4
         */
@@ -470,13 +472,14 @@ out:
 
 
 static int tcp_v6_send_synack(struct sock *sk, struct dst_entry *dst,
-                             struct flowi6 *fl6,
+                             struct flowi *fl,
                              struct request_sock *req,
                              u16 queue_mapping,
                              struct tcp_fastopen_cookie *foc)
 {
        struct inet_request_sock *ireq = inet_rsk(req);
        struct ipv6_pinfo *np = inet6_sk(sk);
+       struct flowi6 *fl6 = &fl->u.ip6;
        struct sk_buff *skb;
        int err = -ENOMEM;
 
@@ -503,18 +506,6 @@ done:
        return err;
 }
 
-static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req)
-{
-       struct flowi6 fl6;
-       int res;
-
-       res = tcp_v6_send_synack(sk, NULL, &fl6, req, 0, NULL);
-       if (!res) {
-               TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPSYNRETRANS);
-       }
-       return res;
-}
 
 static void tcp_v6_reqsk_destructor(struct request_sock *req)
 {
@@ -718,22 +709,66 @@ static int tcp_v6_inbound_md5_hash(struct sock *sk, const struct sk_buff *skb)
 }
 #endif
 
+static void tcp_v6_init_req(struct request_sock *req, struct sock *sk,
+                           struct sk_buff *skb)
+{
+       struct inet_request_sock *ireq = inet_rsk(req);
+       struct ipv6_pinfo *np = inet6_sk(sk);
+
+       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
+       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
+
+       ireq->ir_iif = sk->sk_bound_dev_if;
+
+       /* So that link locals have meaning */
+       if (!sk->sk_bound_dev_if &&
+           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
+               ireq->ir_iif = inet6_iif(skb);
+
+       if (!TCP_SKB_CB(skb)->when &&
+           (ipv6_opt_accepted(sk, skb) || np->rxopt.bits.rxinfo ||
+            np->rxopt.bits.rxoinfo || np->rxopt.bits.rxhlim ||
+            np->rxopt.bits.rxohlim || np->repflow)) {
+               atomic_inc(&skb->users);
+               ireq->pktopts = skb;
+       }
+}
+
+static struct dst_entry *tcp_v6_route_req(struct sock *sk, struct flowi *fl,
+                                         const struct request_sock *req,
+                                         bool *strict)
+{
+       if (strict)
+               *strict = true;
+       return inet6_csk_route_req(sk, &fl->u.ip6, req);
+}
+
 struct request_sock_ops tcp6_request_sock_ops __read_mostly = {
        .family         =       AF_INET6,
        .obj_size       =       sizeof(struct tcp6_request_sock),
-       .rtx_syn_ack    =       tcp_v6_rtx_synack,
+       .rtx_syn_ack    =       tcp_rtx_synack,
        .send_ack       =       tcp_v6_reqsk_send_ack,
        .destructor     =       tcp_v6_reqsk_destructor,
        .send_reset     =       tcp_v6_send_reset,
        .syn_ack_timeout =      tcp_syn_ack_timeout,
 };
 
-#ifdef CONFIG_TCP_MD5SIG
 static const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops = {
+       .mss_clamp      =       IPV6_MIN_MTU - sizeof(struct tcphdr) -
+                               sizeof(struct ipv6hdr),
+#ifdef CONFIG_TCP_MD5SIG
        .md5_lookup     =       tcp_v6_reqsk_md5_lookup,
        .calc_md5_hash  =       tcp_v6_md5_hash_skb,
-};
 #endif
+       .init_req       =       tcp_v6_init_req,
+#ifdef CONFIG_SYN_COOKIES
+       .cookie_init_seq =      cookie_v6_init_sequence,
+#endif
+       .route_req      =       tcp_v6_route_req,
+       .init_seq       =       tcp_v6_init_sequence,
+       .send_synack    =       tcp_v6_send_synack,
+       .queue_hash_add =       inet6_csk_reqsk_queue_hash_add,
+};
 
 static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win,
                                 u32 tsval, u32 tsecr, int oif,
@@ -973,153 +1008,17 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
        return sk;
 }
 
-/* FIXME: this is substantially similar to the ipv4 code.
- * Can some kind of merge be done? -- erics
- */
 static int tcp_v6_conn_request(struct sock *sk, struct sk_buff *skb)
 {
-       struct tcp_options_received tmp_opt;
-       struct request_sock *req;
-       struct inet_request_sock *ireq;
-       struct ipv6_pinfo *np = inet6_sk(sk);
-       struct tcp_sock *tp = tcp_sk(sk);
-       __u32 isn = TCP_SKB_CB(skb)->when;
-       struct dst_entry *dst = NULL;
-       struct tcp_fastopen_cookie foc = { .len = -1 };
-       bool want_cookie = false, fastopen;
-       struct flowi6 fl6;
-       int err;
-
        if (skb->protocol == htons(ETH_P_IP))
                return tcp_v4_conn_request(sk, skb);
 
        if (!ipv6_unicast_destination(skb))
                goto drop;
 
-       if ((sysctl_tcp_syncookies == 2 ||
-            inet_csk_reqsk_queue_is_full(sk)) && !isn) {
-               want_cookie = tcp_syn_flood_action(sk, skb, "TCPv6");
-               if (!want_cookie)
-                       goto drop;
-       }
-
-       if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
-               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
-               goto drop;
-       }
-
-       req = inet6_reqsk_alloc(&tcp6_request_sock_ops);
-       if (req == NULL)
-               goto drop;
-
-#ifdef CONFIG_TCP_MD5SIG
-       tcp_rsk(req)->af_specific = &tcp_request_sock_ipv6_ops;
-#endif
-
-       tcp_clear_options(&tmp_opt);
-       tmp_opt.mss_clamp = IPV6_MIN_MTU - sizeof(struct tcphdr) - sizeof(struct ipv6hdr);
-       tmp_opt.user_mss = tp->rx_opt.user_mss;
-       tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
+       return tcp_conn_request(&tcp6_request_sock_ops,
+                               &tcp_request_sock_ipv6_ops, sk, skb);
 
-       if (want_cookie && !tmp_opt.saw_tstamp)
-               tcp_clear_options(&tmp_opt);
-
-       tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
-       tcp_openreq_init(req, &tmp_opt, skb);
-
-       ireq = inet_rsk(req);
-       ireq->ir_v6_rmt_addr = ipv6_hdr(skb)->saddr;
-       ireq->ir_v6_loc_addr = ipv6_hdr(skb)->daddr;
-       if (!want_cookie || tmp_opt.tstamp_ok)
-               TCP_ECN_create_request(req, skb, sock_net(sk));
-
-       ireq->ir_iif = sk->sk_bound_dev_if;
-       ireq->ir_mark = inet_request_mark(sk, skb);
-
-       /* So that link locals have meaning */
-       if (!sk->sk_bound_dev_if &&
-           ipv6_addr_type(&ireq->ir_v6_rmt_addr) & IPV6_ADDR_LINKLOCAL)
-               ireq->ir_iif = inet6_iif(skb);
-
-       if (!isn) {
-               if (ipv6_opt_accepted(sk, skb) ||
-                   np->rxopt.bits.rxinfo || np->rxopt.bits.rxoinfo ||
-                   np->rxopt.bits.rxhlim || np->rxopt.bits.rxohlim ||
-                   np->repflow) {
-                       atomic_inc(&skb->users);
-                       ireq->pktopts = skb;
-               }
-
-               if (want_cookie) {
-                       isn = cookie_v6_init_sequence(sk, skb, &req->mss);
-                       req->cookie_ts = tmp_opt.tstamp_ok;
-                       goto have_isn;
-               }
-
-               /* VJ's idea. We save last timestamp seen
-                * from the destination in peer table, when entering
-                * state TIME-WAIT, and check against it before
-                * accepting new connection request.
-                *
-                * If "isn" is not zero, this request hit alive
-                * timewait bucket, so that all the necessary checks
-                * are made in the function processing timewait state.
-                */
-               if (tmp_opt.saw_tstamp &&
-                   tcp_death_row.sysctl_tw_recycle &&
-                   (dst = inet6_csk_route_req(sk, &fl6, req)) != NULL) {
-                       if (!tcp_peer_is_proven(req, dst, true)) {
-                               NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
-                               goto drop_and_release;
-                       }
-               }
-               /* Kill the following clause, if you dislike this way. */
-               else if (!sysctl_tcp_syncookies &&
-                        (sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
-                         (sysctl_max_syn_backlog >> 2)) &&
-                        !tcp_peer_is_proven(req, dst, false)) {
-                       /* Without syncookies last quarter of
-                        * backlog is filled with destinations,
-                        * proven to be alive.
-                        * It means that we continue to communicate
-                        * to destinations, already remembered
-                        * to the moment of synflood.
-                        */
-                       LIMIT_NETDEBUG(KERN_DEBUG "TCP: drop open request from %pI6/%u\n",
-                                      &ireq->ir_v6_rmt_addr, ntohs(tcp_hdr(skb)->source));
-                       goto drop_and_release;
-               }
-
-               isn = tcp_v6_init_sequence(skb);
-       }
-have_isn:
-
-       if (security_inet_conn_request(sk, skb, req))
-               goto drop_and_release;
-
-       if (!dst && (dst = inet6_csk_route_req(sk, &fl6, req)) == NULL)
-               goto drop_and_free;
-
-       tcp_rsk(req)->snt_isn = isn;
-       tcp_rsk(req)->snt_synack = tcp_time_stamp;
-       tcp_openreq_init_rwin(req, sk, dst);
-       fastopen = !want_cookie &&
-                  tcp_try_fastopen(sk, skb, req, &foc, dst);
-       err = tcp_v6_send_synack(sk, dst, &fl6, req,
-                                skb_get_queue_mapping(skb), &foc);
-       if (!fastopen) {
-               if (err || want_cookie)
-                       goto drop_and_free;
-
-               tcp_rsk(req)->listener = NULL;
-               inet6_csk_reqsk_queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
-       }
-       return 0;
-
-drop_and_release:
-       dst_release(dst);
-drop_and_free:
-       reqsk_free(req);
 drop:
        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
        return 0; /* don't send reset */
@@ -1235,6 +1134,8 @@ static struct sock *tcp_v6_syn_recv_sock(struct sock *sk, struct sk_buff *skb,
        newsk->sk_v6_rcv_saddr = ireq->ir_v6_loc_addr;
        newsk->sk_bound_dev_if = ireq->ir_iif;
 
+       ip6_set_txhash(newsk);
+
        /* Now IPv6 options...
 
           First: no IPv4 options.
index 8517d3c..01b0ff9 100644 (file)
@@ -73,7 +73,7 @@ static int tcp6_gro_complete(struct sk_buff *skb, int thoff)
 
        th->check = ~tcp_v6_check(skb->len - thoff, &iph->saddr,
                                  &iph->daddr, 0);
-       skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
+       skb_shinfo(skb)->gso_type |= SKB_GSO_TCPV6;
 
        return tcp_gro_complete(skb);
 }
index 95c8347..4836af8 100644 (file)
@@ -79,7 +79,6 @@ static unsigned int udp6_ehashfn(struct net *net,
 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
 {
        const struct in6_addr *sk2_rcv_saddr6 = inet6_rcv_saddr(sk2);
-       int sk_ipv6only = ipv6_only_sock(sk);
        int sk2_ipv6only = inet_v6_ipv6only(sk2);
        int addr_type = ipv6_addr_type(&sk->sk_v6_rcv_saddr);
        int addr_type2 = sk2_rcv_saddr6 ? ipv6_addr_type(sk2_rcv_saddr6) : IPV6_ADDR_MAPPED;
@@ -95,7 +94,7 @@ int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
                return 1;
 
        if (addr_type == IPV6_ADDR_ANY &&
-           !(sk_ipv6only && addr_type2 == IPV6_ADDR_MAPPED))
+           !(ipv6_only_sock(sk) && addr_type2 == IPV6_ADDR_MAPPED))
                return 1;
 
        if (sk2_rcv_saddr6 &&
@@ -473,7 +472,7 @@ try_again:
                        sin6->sin6_addr = ipv6_hdr(skb)->saddr;
                        sin6->sin6_scope_id =
                                ipv6_iface_scope_id(&sin6->sin6_addr,
-                                                   IP6CB(skb)->iif);
+                                                   inet6_iif(skb));
                }
                *addr_len = sizeof(*sin6);
        }
@@ -534,11 +533,15 @@ void __udp6_lib_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
        struct udphdr *uh = (struct udphdr*)(skb->data+offset);
        struct sock *sk;
        int err;
+       struct net *net = dev_net(skb->dev);
 
-       sk = __udp6_lib_lookup(dev_net(skb->dev), daddr, uh->dest,
+       sk = __udp6_lib_lookup(net, daddr, uh->dest,
                               saddr, uh->source, inet6_iif(skb), udptable);
-       if (sk == NULL)
+       if (sk == NULL) {
+               ICMP6_INC_STATS_BH(net, __in6_dev_get(skb->dev),
+                                  ICMP6_MIB_INERRORS);
                return;
+       }
 
        if (type == ICMPV6_PKT_TOOBIG) {
                if (!ip6_sk_accept_pmtu(sk))
@@ -674,8 +677,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
                        goto csum_error;
        }
 
-       if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf))
+       if (sk_rcvqueues_full(sk, sk->sk_rcvbuf)) {
+               UDP6_INC_STATS_BH(sock_net(sk),
+                                 UDP_MIB_RCVBUFERRORS, is_udplite);
                goto drop;
+       }
 
        skb_dst_drop(skb);
 
@@ -690,6 +696,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
        bh_unlock_sock(sk);
 
        return rc;
+
 csum_error:
        UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
 drop:
@@ -699,43 +706,26 @@ drop:
        return -1;
 }
 
-static struct sock *udp_v6_mcast_next(struct net *net, struct sock *sk,
-                                     __be16 loc_port, const struct in6_addr *loc_addr,
-                                     __be16 rmt_port, const struct in6_addr *rmt_addr,
-                                     int dif)
+static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
+                                  __be16 loc_port, const struct in6_addr *loc_addr,
+                                  __be16 rmt_port, const struct in6_addr *rmt_addr,
+                                  int dif, unsigned short hnum)
 {
-       struct hlist_nulls_node *node;
-       unsigned short num = ntohs(loc_port);
-
-       sk_nulls_for_each_from(sk, node) {
-               struct inet_sock *inet = inet_sk(sk);
-
-               if (!net_eq(sock_net(sk), net))
-                       continue;
-
-               if (udp_sk(sk)->udp_port_hash == num &&
-                   sk->sk_family == PF_INET6) {
-                       if (inet->inet_dport) {
-                               if (inet->inet_dport != rmt_port)
-                                       continue;
-                       }
-                       if (!ipv6_addr_any(&sk->sk_v6_daddr) &&
-                           !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr))
-                               continue;
-
-                       if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
-                               continue;
+       struct inet_sock *inet = inet_sk(sk);
 
-                       if (!ipv6_addr_any(&sk->sk_v6_rcv_saddr)) {
-                               if (!ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr))
-                                       continue;
-                       }
-                       if (!inet6_mc_check(sk, loc_addr, rmt_addr))
-                               continue;
-                       return sk;
-               }
-       }
-       return NULL;
+       if (!net_eq(sock_net(sk), net))
+               return false;
+
+       if (udp_sk(sk)->udp_port_hash != hnum ||
+           sk->sk_family != PF_INET6 ||
+           (inet->inet_dport && inet->inet_dport != rmt_port) ||
+           (!ipv6_addr_any(&sk->sk_v6_daddr) &&
+                   !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
+           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+               return false;
+       if (!inet6_mc_check(sk, loc_addr, rmt_addr))
+               return false;
+       return true;
 }
 
 static void flush_stack(struct sock **stack, unsigned int count,
@@ -759,6 +749,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
 
                if (skb1 && udpv6_queue_rcv_skb(sk, skb1) <= 0)
                        skb1 = NULL;
+               sock_put(sk);
        }
        if (unlikely(skb1))
                kfree_skb(skb1);
@@ -784,43 +775,51 @@ static int __udp6_lib_mcast_deliver(struct net *net, struct sk_buff *skb,
 {
        struct sock *sk, *stack[256 / sizeof(struct sock *)];
        const struct udphdr *uh = udp_hdr(skb);
-       struct udp_hslot *hslot = udp_hashslot(udptable, net, ntohs(uh->dest));
-       int dif;
-       unsigned int i, count = 0;
+       struct hlist_nulls_node *node;
+       unsigned short hnum = ntohs(uh->dest);
+       struct udp_hslot *hslot = udp_hashslot(udptable, net, hnum);
+       int dif = inet6_iif(skb);
+       unsigned int count = 0, offset = offsetof(typeof(*sk), sk_nulls_node);
+       unsigned int hash2 = 0, hash2_any = 0, use_hash2 = (hslot->count > 10);
+
+       if (use_hash2) {
+               hash2_any = udp6_portaddr_hash(net, &in6addr_any, hnum) &
+                           udp_table.mask;
+               hash2 = udp6_portaddr_hash(net, daddr, hnum) & udp_table.mask;
+start_lookup:
+               hslot = &udp_table.hash2[hash2];
+               offset = offsetof(typeof(*sk), __sk_common.skc_portaddr_node);
+       }
 
        spin_lock(&hslot->lock);
-       sk = sk_nulls_head(&hslot->head);
-       dif = inet6_iif(skb);
-       sk = udp_v6_mcast_next(net, sk, uh->dest, daddr, uh->source, saddr, dif);
-       while (sk) {
-               /* If zero checksum and no_check is not on for
-                * the socket then skip it.
-                */
-               if (uh->check || udp_sk(sk)->no_check6_rx)
+       sk_nulls_for_each_entry_offset(sk, node, &hslot->head, offset) {
+               if (__udp_v6_is_mcast_sock(net, sk,
+                                          uh->dest, daddr,
+                                          uh->source, saddr,
+                                          dif, hnum) &&
+                   /* If zero checksum and no_check is not on for
+                    * the socket then skip it.
+                    */
+                   (uh->check || udp_sk(sk)->no_check6_rx)) {
+                       if (unlikely(count == ARRAY_SIZE(stack))) {
+                               flush_stack(stack, count, skb, ~0);
+                               count = 0;
+                       }
                        stack[count++] = sk;
-
-               sk = udp_v6_mcast_next(net, sk_nulls_next(sk), uh->dest, daddr,
-                                      uh->source, saddr, dif);
-               if (unlikely(count == ARRAY_SIZE(stack))) {
-                       if (!sk)
-                               break;
-                       flush_stack(stack, count, skb, ~0);
-                       count = 0;
+                       sock_hold(sk);
                }
        }
-       /*
-        * before releasing the lock, we must take reference on sockets
-        */
-       for (i = 0; i < count; i++)
-               sock_hold(stack[i]);
 
        spin_unlock(&hslot->lock);
 
+       /* Also lookup *:port if we are using hash2 and haven't done so yet. */
+       if (use_hash2 && hash2 != hash2_any) {
+               hash2 = hash2_any;
+               goto start_lookup;
+       }
+
        if (count) {
                flush_stack(stack, count, skb, count - 1);
-
-               for (i = 0; i < count; i++)
-                       sock_put(stack[i]);
        } else {
                kfree_skb(skb);
        }
index 54747c2..92fafd4 100644 (file)
@@ -674,7 +674,6 @@ static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name)
                        self->daddr = DEV_ADDR_ANY;
                        kfree(discoveries);
                        return -EHOSTUNREACH;
-                       break;
                }
        }
        /* Cleanup our copy of the discovery log */
index 365b895..9e0d909 100644 (file)
@@ -293,7 +293,8 @@ static void irda_device_setup(struct net_device *dev)
  */
 struct net_device *alloc_irdadev(int sizeof_priv)
 {
-       return alloc_netdev(sizeof_priv, "irda%d", irda_device_setup);
+       return alloc_netdev(sizeof_priv, "irda%d", NET_NAME_UNKNOWN,
+                           irda_device_setup);
 }
 EXPORT_SYMBOL(alloc_irdadev);
 
index 7ac4d1b..1bc49ed 100644 (file)
@@ -1024,7 +1024,6 @@ static int __irlan_insert_param(struct sk_buff *skb, char *param, int type,
        default:
                IRDA_DEBUG(2, "%s(), Unknown parameter type!\n", __func__ );
                return 0;
-               break;
        }
 
        /* Insert at end of sk-buffer */
index ffcec22..dc13f1a 100644 (file)
@@ -96,7 +96,7 @@ static void irlan_eth_setup(struct net_device *dev)
  */
 struct net_device *alloc_irlandev(const char *name)
 {
-       return alloc_netdev(sizeof(struct irlan_cb), name,
+       return alloc_netdev(sizeof(struct irlan_cb), name, NET_NAME_UNKNOWN,
                            irlan_eth_setup);
 }
 
index 98ad6ec..a5f28d4 100644 (file)
@@ -1426,7 +1426,8 @@ __u8 *irlmp_hint_to_service(__u8 *hint)
                if (hint[1] & HINT_TELEPHONY) {
                        IRDA_DEBUG(1, "Telephony ");
                        service[i++] = S_TELEPHONY;
-               } if (hint[1] & HINT_FILE_SERVER)
+               }
+               if (hint[1] & HINT_FILE_SERVER)
                        IRDA_DEBUG(1, "File Server ");
 
                if (hint[1] & HINT_COMM) {
index 7a95fa4..a089b6b 100644 (file)
@@ -1103,7 +1103,6 @@ static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock,
                default:
                        err = -EINVAL;
                        goto out;
-                       break;
                }
        }
 
@@ -1543,7 +1542,8 @@ static int iucv_sock_shutdown(struct socket *sock, int how)
 
        sk->sk_shutdown |= how;
        if (how == RCV_SHUTDOWN || how == SHUTDOWN_MASK) {
-               if (iucv->transport == AF_IUCV_TRANS_IUCV) {
+               if ((iucv->transport == AF_IUCV_TRANS_IUCV) &&
+                   iucv->path) {
                        err = pr_iucv->path_quiesce(iucv->path, NULL);
                        if (err)
                                err = -ENOTCONN;
index ba2a2f9..1847ec4 100644 (file)
@@ -405,7 +405,6 @@ static int verify_address_len(const void *p)
                 * XXX When it can, remove this -EINVAL.  -DaveM
                 */
                return -EINVAL;
-               break;
        }
 
        return 0;
@@ -536,7 +535,6 @@ pfkey_satype2proto(uint8_t satype)
                return IPPROTO_ESP;
        case SADB_X_SATYPE_IPCOMP:
                return IPPROTO_COMP;
-               break;
        default:
                return 0;
        }
@@ -553,7 +551,6 @@ pfkey_proto2satype(uint16_t proto)
                return SADB_SATYPE_ESP;
        case IPPROTO_COMP:
                return SADB_X_SATYPE_IPCOMP;
-               break;
        default:
                return 0;
        }
index adb9843..378c73b 100644 (file)
@@ -6,6 +6,7 @@ menuconfig L2TP
        tristate "Layer Two Tunneling Protocol (L2TP)"
        depends on (IPV6 || IPV6=n)
        depends on INET
+       select NET_UDP_TUNNEL
        ---help---
          Layer Two Tunneling Protocol
 
index bea2590..1109d3b 100644 (file)
@@ -52,6 +52,7 @@
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/udp.h>
+#include <net/udp_tunnel.h>
 #include <net/inet_common.h>
 #include <net/xfrm.h>
 #include <net/protocol.h>
@@ -1358,81 +1359,46 @@ static int l2tp_tunnel_sock_create(struct net *net,
 {
        int err = -EINVAL;
        struct socket *sock = NULL;
-       struct sockaddr_in udp_addr = {0};
-       struct sockaddr_l2tpip ip_addr = {0};
-#if IS_ENABLED(CONFIG_IPV6)
-       struct sockaddr_in6 udp6_addr = {0};
-       struct sockaddr_l2tpip6 ip6_addr = {0};
-#endif
+       struct udp_port_cfg udp_conf;
 
        switch (cfg->encap) {
        case L2TP_ENCAPTYPE_UDP:
+               memset(&udp_conf, 0, sizeof(udp_conf));
+
 #if IS_ENABLED(CONFIG_IPV6)
                if (cfg->local_ip6 && cfg->peer_ip6) {
-                       err = sock_create_kern(AF_INET6, SOCK_DGRAM, 0, &sock);
-                       if (err < 0)
-                               goto out;
-
-                       sk_change_net(sock->sk, net);
-
-                       udp6_addr.sin6_family = AF_INET6;
-                       memcpy(&udp6_addr.sin6_addr, cfg->local_ip6,
-                              sizeof(udp6_addr.sin6_addr));
-                       udp6_addr.sin6_port = htons(cfg->local_udp_port);
-                       err = kernel_bind(sock, (struct sockaddr *) &udp6_addr,
-                                         sizeof(udp6_addr));
-                       if (err < 0)
-                               goto out;
-
-                       udp6_addr.sin6_family = AF_INET6;
-                       memcpy(&udp6_addr.sin6_addr, cfg->peer_ip6,
-                              sizeof(udp6_addr.sin6_addr));
-                       udp6_addr.sin6_port = htons(cfg->peer_udp_port);
-                       err = kernel_connect(sock,
-                                            (struct sockaddr *) &udp6_addr,
-                                            sizeof(udp6_addr), 0);
-                       if (err < 0)
-                               goto out;
-
-                       if (cfg->udp6_zero_tx_checksums)
-                               udp_set_no_check6_tx(sock->sk, true);
-                       if (cfg->udp6_zero_rx_checksums)
-                               udp_set_no_check6_rx(sock->sk, true);
+                       udp_conf.family = AF_INET6;
+                       memcpy(&udp_conf.local_ip6, cfg->local_ip6,
+                              sizeof(udp_conf.local_ip6));
+                       memcpy(&udp_conf.peer_ip6, cfg->peer_ip6,
+                              sizeof(udp_conf.peer_ip6));
+                       udp_conf.use_udp6_tx_checksums =
+                           cfg->udp6_zero_tx_checksums;
+                       udp_conf.use_udp6_rx_checksums =
+                           cfg->udp6_zero_rx_checksums;
                } else
 #endif
                {
-                       err = sock_create_kern(AF_INET, SOCK_DGRAM, 0, &sock);
-                       if (err < 0)
-                               goto out;
-
-                       sk_change_net(sock->sk, net);
-
-                       udp_addr.sin_family = AF_INET;
-                       udp_addr.sin_addr = cfg->local_ip;
-                       udp_addr.sin_port = htons(cfg->local_udp_port);
-                       err = kernel_bind(sock, (struct sockaddr *) &udp_addr,
-                                         sizeof(udp_addr));
-                       if (err < 0)
-                               goto out;
-
-                       udp_addr.sin_family = AF_INET;
-                       udp_addr.sin_addr = cfg->peer_ip;
-                       udp_addr.sin_port = htons(cfg->peer_udp_port);
-                       err = kernel_connect(sock,
-                                            (struct sockaddr *) &udp_addr,
-                                            sizeof(udp_addr), 0);
-                       if (err < 0)
-                               goto out;
+                       udp_conf.family = AF_INET;
+                       udp_conf.local_ip = cfg->local_ip;
+                       udp_conf.peer_ip = cfg->peer_ip;
+                       udp_conf.use_udp_checksums = cfg->use_udp_checksums;
                }
 
-               if (!cfg->use_udp_checksums)
-                       sock->sk->sk_no_check_tx = 1;
+               udp_conf.local_udp_port = htons(cfg->local_udp_port);
+               udp_conf.peer_udp_port = htons(cfg->peer_udp_port);
+
+               err = udp_sock_create(net, &udp_conf, &sock);
+               if (err < 0)
+                       goto out;
 
                break;
 
        case L2TP_ENCAPTYPE_IP:
 #if IS_ENABLED(CONFIG_IPV6)
                if (cfg->local_ip6 && cfg->peer_ip6) {
+                       struct sockaddr_l2tpip6 ip6_addr = {0};
+
                        err = sock_create_kern(AF_INET6, SOCK_DGRAM,
                                          IPPROTO_L2TP, &sock);
                        if (err < 0)
@@ -1461,6 +1427,8 @@ static int l2tp_tunnel_sock_create(struct net *net,
                } else
 #endif
                {
+                       struct sockaddr_l2tpip ip_addr = {0};
+
                        err = sock_create_kern(AF_INET, SOCK_DGRAM,
                                          IPPROTO_L2TP, &sock);
                        if (err < 0)
index 76125c5..edb78e6 100644 (file)
@@ -246,7 +246,8 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
                goto out;
        }
 
-       dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup);
+       dev = alloc_netdev(sizeof(*priv), name, NET_NAME_UNKNOWN,
+                          l2tp_eth_dev_setup);
        if (!dev) {
                rc = -ENOMEM;
                goto out_del_session;
index f3f98a1..0edb263 100644 (file)
@@ -687,7 +687,7 @@ static int l2tp_ip6_recvmsg(struct kiocb *iocb, struct sock *sk,
                lsa->l2tp_scope_id = 0;
                lsa->l2tp_conn_id = 0;
                if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
-                       lsa->l2tp_scope_id = IP6CB(skb)->iif;
+                       lsa->l2tp_scope_id = inet6_iif(skb);
        }
 
        if (np->rxopt.all)
index 950909f..13752d9 100644 (file)
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
        int err;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.setsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (optlen < sizeof(int))
                return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
        struct pppol2tp_session *ps;
 
        if (level != SOL_PPPOL2TP)
-               return udp_prot.getsockopt(sk, level, optname, optval, optlen);
+               return -EINVAL;
 
        if (get_user(len, optlen))
                return -EFAULT;
index 29be885..01eede7 100644 (file)
@@ -1653,9 +1653,9 @@ int ieee80211_if_add(struct ieee80211_local *local, const char *name,
                if (local->hw.queues >= IEEE80211_NUM_ACS)
                        txqs = IEEE80211_NUM_ACS;
 
-               ndev = alloc_netdev_mqs(sizeof(*sdata) +
-                                       local->hw.vif_data_size,
-                                       name, ieee80211_if_setup, txqs, 1);
+               ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size,
+                                       name, NET_NAME_UNKNOWN,
+                                       ieee80211_if_setup, txqs, 1);
                if (!ndev)
                        return -ENOMEM;
                dev_net_set(ndev, wiphy_net(local->hw.wiphy));
index 94758b9..214e63b 100644 (file)
@@ -157,7 +157,6 @@ static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
        default:
                kfree_skb(skb);
                return -ENOTSUPP;
-               break;
        }
        *pos++ = ie_len;
        *pos++ = flags;
index 2cf66d8..b36b2b9 100644 (file)
@@ -143,6 +143,7 @@ static void
 mac802154_del_iface(struct wpan_phy *phy, struct net_device *dev)
 {
        struct mac802154_sub_if_data *sdata;
+
        ASSERT_RTNL();
 
        sdata = netdev_priv(dev);
@@ -166,11 +167,13 @@ mac802154_add_iface(struct wpan_phy *phy, const char *name, int type)
        switch (type) {
        case IEEE802154_DEV_MONITOR:
                dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
-                                  name, mac802154_monitor_setup);
+                                  name, NET_NAME_UNKNOWN,
+                                  mac802154_monitor_setup);
                break;
        case IEEE802154_DEV_WPAN:
                dev = alloc_netdev(sizeof(struct mac802154_sub_if_data),
-                                  name, mac802154_wpan_setup);
+                                  name, NET_NAME_UNKNOWN,
+                                  mac802154_wpan_setup);
                break;
        default:
                dev = NULL;
@@ -276,7 +279,8 @@ ieee802154_alloc_device(size_t priv_data_len, struct ieee802154_ops *ops)
        }
 
        priv = wpan_phy_priv(phy);
-       priv->hw.phy = priv->phy = phy;
+       priv->phy = phy;
+       priv->hw.phy = priv->phy;
        priv->hw.priv = (char *)priv + ALIGN(sizeof(*priv), NETDEV_ALIGN);
        priv->ops = ops;
 
@@ -302,29 +306,61 @@ EXPORT_SYMBOL(ieee802154_free_device);
 int ieee802154_register_device(struct ieee802154_dev *dev)
 {
        struct mac802154_priv *priv = mac802154_to_priv(dev);
-       int rc = -ENOMEM;
+       int rc = -ENOSYS;
+
+       if (dev->flags & IEEE802154_HW_TXPOWER) {
+               if (!priv->ops->set_txpower)
+                       goto out;
+
+               priv->phy->set_txpower = mac802154_set_txpower;
+       }
+
+       if (dev->flags & IEEE802154_HW_LBT) {
+               if (!priv->ops->set_lbt)
+                       goto out;
+
+               priv->phy->set_lbt = mac802154_set_lbt;
+       }
+
+       if (dev->flags & IEEE802154_HW_CCA_MODE) {
+               if (!priv->ops->set_cca_mode)
+                       goto out;
+
+               priv->phy->set_cca_mode = mac802154_set_cca_mode;
+       }
+
+       if (dev->flags & IEEE802154_HW_CCA_ED_LEVEL) {
+               if (!priv->ops->set_cca_ed_level)
+                       goto out;
+
+               priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
+       }
+
+       if (dev->flags & IEEE802154_HW_CSMA_PARAMS) {
+               if (!priv->ops->set_csma_params)
+                       goto out;
+
+               priv->phy->set_csma_params = mac802154_set_csma_params;
+       }
+
+       if (dev->flags & IEEE802154_HW_FRAME_RETRIES) {
+               if (!priv->ops->set_frame_retries)
+                       goto out;
+
+               priv->phy->set_frame_retries = mac802154_set_frame_retries;
+       }
 
        priv->dev_workqueue =
                create_singlethread_workqueue(wpan_phy_name(priv->phy));
-       if (!priv->dev_workqueue)
+       if (!priv->dev_workqueue) {
+               rc = -ENOMEM;
                goto out;
+       }
 
        wpan_phy_set_dev(priv->phy, priv->hw.parent);
 
        priv->phy->add_iface = mac802154_add_iface;
        priv->phy->del_iface = mac802154_del_iface;
-       if (priv->ops->set_txpower)
-               priv->phy->set_txpower = mac802154_set_txpower;
-       if (priv->ops->set_lbt)
-               priv->phy->set_lbt = mac802154_set_lbt;
-       if (priv->ops->set_cca_mode)
-               priv->phy->set_cca_mode = mac802154_set_cca_mode;
-       if (priv->ops->set_cca_ed_level)
-               priv->phy->set_cca_ed_level = mac802154_set_cca_ed_level;
-       if (priv->ops->set_csma_params)
-               priv->phy->set_csma_params = mac802154_set_csma_params;
-       if (priv->ops->set_frame_retries)
-               priv->phy->set_frame_retries = mac802154_set_frame_retries;
 
        rc = wpan_phy_register(priv->phy);
        if (rc < 0)
index 1456f73..4570581 100644 (file)
@@ -538,6 +538,7 @@ static int llsec_recover_addr(struct mac802154_llsec *sec,
                              struct ieee802154_addr *addr)
 {
        __le16 caddr = sec->params.coord_shortaddr;
+
        addr->pan_id = sec->params.pan_id;
 
        if (caddr == cpu_to_le16(IEEE802154_ADDR_BROADCAST)) {
index 15aa2f2..868a040 100644 (file)
@@ -175,9 +175,9 @@ static void phy_chan_notify(struct work_struct *work)
 
        mutex_lock(&priv->hw->phy->pib_lock);
        res = hw->ops->set_channel(&hw->hw, priv->page, priv->chan);
-       if (res)
+       if (res) {
                pr_debug("set_channel failed\n");
-       else {
+       else {
                priv->hw->phy->current_channel = priv->chan;
                priv->hw->phy->current_page = priv->page;
        }
@@ -210,8 +210,9 @@ void mac802154_dev_set_page_channel(struct net_device *dev, u8 page, u8 chan)
                INIT_WORK(&work->work, phy_chan_notify);
                work->dev = dev;
                queue_work(priv->hw->dev_workqueue, &work->work);
-       } else
+       } else {
                mutex_unlock(&priv->hw->phy->pib_lock);
+       }
 }
 
 
index 6d16473..8124353 100644 (file)
@@ -98,6 +98,7 @@ netdev_tx_t mac802154_tx(struct mac802154_priv *priv, struct sk_buff *skb,
        if (!(priv->hw.flags & IEEE802154_HW_OMIT_CKSUM)) {
                u16 crc = crc_ccitt(0, skb->data, skb->len);
                u8 *data = skb_put(skb, 2);
+
                data[0] = crc & 0xff;
                data[1] = crc >> 8;
        }
index e9410d1..ad751fe 100644 (file)
@@ -46,6 +46,9 @@ config NF_CONNTRACK
 
          To compile it as a module, choose M here.  If unsure, say N.
 
+config NF_LOG_COMMON
+       tristate
+
 if NF_CONNTRACK
 
 config NF_CONNTRACK_MARK
@@ -744,6 +747,7 @@ config NETFILTER_XT_TARGET_LED
 
 config NETFILTER_XT_TARGET_LOG
        tristate "LOG target support"
+       depends on NF_LOG_IPV4 && NF_LOG_IPV6
        default m if NETFILTER_ADVANCED=n
        help
          This option adds a `LOG' target, which allows you to create rules in
index bffdad7..8308624 100644 (file)
@@ -47,6 +47,9 @@ obj-$(CONFIG_NF_CONNTRACK_TFTP) += nf_conntrack_tftp.o
 nf_nat-y       := nf_nat_core.o nf_nat_proto_unknown.o nf_nat_proto_common.o \
                   nf_nat_proto_udp.o nf_nat_proto_tcp.o nf_nat_helper.o
 
+# generic transport layer logging
+obj-$(CONFIG_NF_LOG_COMMON) += nf_log_common.o
+
 obj-$(CONFIG_NF_NAT) += nf_nat.o
 
 # NAT protocols (nf_nat)
index a8eb0a8..610e19c 100644 (file)
@@ -797,7 +797,6 @@ static void ip_vs_conn_expire(unsigned long data)
                        ip_vs_control_del(cp);
 
                if (cp->flags & IP_VS_CONN_F_NFCT) {
-                       ip_vs_conn_drop_conntrack(cp);
                        /* Do not access conntracks during subsys cleanup
                         * because nf_conntrack_find_get can not be used after
                         * conntrack cleanup for the net.
index c42e83d..8416307 100644 (file)
@@ -1806,92 +1806,6 @@ static struct ctl_table vs_vars[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-#endif
-#if 0
-       {
-               .procname       = "timeout_established",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_ESTABLISHED],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_synsent",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_SENT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_synrecv",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_SYN_RECV],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_finwait",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_FIN_WAIT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_timewait",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_TIME_WAIT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_close",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_closewait",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_CLOSE_WAIT],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_lastack",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_LAST_ACK],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_listen",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_LISTEN],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_synack",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_SYNACK],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_udp",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_UDP],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
-       {
-               .procname       = "timeout_icmp",
-               .data   = &vs_timeout_table_dos.timeout[IP_VS_S_ICMP],
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
 #endif
        { }
 };
@@ -3778,6 +3692,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
        cancel_delayed_work_sync(&ipvs->defense_work);
        cancel_work_sync(&ipvs->defense_work.work);
        unregister_net_sysctl_table(ipvs->sysctl_hdr);
+       ip_vs_stop_estimator(net, &ipvs->tot_stats);
 }
 
 #else
@@ -3840,7 +3755,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
        struct netns_ipvs *ipvs = net_ipvs(net);
 
        ip_vs_trash_cleanup(net);
-       ip_vs_stop_estimator(net, &ipvs->tot_stats);
        ip_vs_control_net_cleanup_sysctl(net);
        remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
        remove_proc_entry("ip_vs_stats", net->proc_net);
index db80126..eadffb2 100644 (file)
@@ -886,8 +886,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
                cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark);
                rcu_read_unlock();
                if (!cp) {
-                       if (param->pe_data)
-                               kfree(param->pe_data);
+                       kfree(param->pe_data);
                        IP_VS_DBG(2, "BACKUP, add new conn. failed\n");
                        return;
                }
index 1f4f954..de88c4a 100644 (file)
@@ -352,40 +352,6 @@ static void nf_ct_delete_from_lists(struct nf_conn *ct)
        local_bh_enable();
 }
 
-static void death_by_event(unsigned long ul_conntrack)
-{
-       struct nf_conn *ct = (void *)ul_conntrack;
-       struct net *net = nf_ct_net(ct);
-       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
-
-       BUG_ON(ecache == NULL);
-
-       if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) {
-               /* bad luck, let's retry again */
-               ecache->timeout.expires = jiffies +
-                       (prandom_u32() % net->ct.sysctl_events_retry_timeout);
-               add_timer(&ecache->timeout);
-               return;
-       }
-       /* we've got the event delivered, now it's dying */
-       set_bit(IPS_DYING_BIT, &ct->status);
-       nf_ct_put(ct);
-}
-
-static void nf_ct_dying_timeout(struct nf_conn *ct)
-{
-       struct net *net = nf_ct_net(ct);
-       struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct);
-
-       BUG_ON(ecache == NULL);
-
-       /* set a new timer to retry event delivery */
-       setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct);
-       ecache->timeout.expires = jiffies +
-               (prandom_u32() % net->ct.sysctl_events_retry_timeout);
-       add_timer(&ecache->timeout);
-}
-
 bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
 {
        struct nf_conn_tstamp *tstamp;
@@ -394,15 +360,20 @@ bool nf_ct_delete(struct nf_conn *ct, u32 portid, int report)
        if (tstamp && tstamp->stop == 0)
                tstamp->stop = ktime_to_ns(ktime_get_real());
 
-       if (!nf_ct_is_dying(ct) &&
-           unlikely(nf_conntrack_event_report(IPCT_DESTROY, ct,
-           portid, report) < 0)) {
+       if (nf_ct_is_dying(ct))
+               goto delete;
+
+       if (nf_conntrack_event_report(IPCT_DESTROY, ct,
+                                   portid, report) < 0) {
                /* destroy event was not delivered */
                nf_ct_delete_from_lists(ct);
-               nf_ct_dying_timeout(ct);
+               nf_conntrack_ecache_delayed_work(nf_ct_net(ct));
                return false;
        }
+
+       nf_conntrack_ecache_work(nf_ct_net(ct));
        set_bit(IPS_DYING_BIT, &ct->status);
+ delete:
        nf_ct_delete_from_lists(ct);
        nf_ct_put(ct);
        return true;
@@ -1464,26 +1435,6 @@ void nf_conntrack_flush_report(struct net *net, u32 portid, int report)
 }
 EXPORT_SYMBOL_GPL(nf_conntrack_flush_report);
 
-static void nf_ct_release_dying_list(struct net *net)
-{
-       struct nf_conntrack_tuple_hash *h;
-       struct nf_conn *ct;
-       struct hlist_nulls_node *n;
-       int cpu;
-
-       for_each_possible_cpu(cpu) {
-               struct ct_pcpu *pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
-
-               spin_lock_bh(&pcpu->lock);
-               hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
-                       ct = nf_ct_tuplehash_to_ctrack(h);
-                       /* never fails to remove them, no listeners at this point */
-                       nf_ct_kill(ct);
-               }
-               spin_unlock_bh(&pcpu->lock);
-       }
-}
-
 static int untrack_refs(void)
 {
        int cnt = 0, cpu;
@@ -1548,7 +1499,6 @@ i_see_dead_people:
        busy = 0;
        list_for_each_entry(net, net_exit_list, exit_list) {
                nf_ct_iterate_cleanup(net, kill_all, NULL, 0, 0);
-               nf_ct_release_dying_list(net);
                if (atomic_read(&net->ct.count) != 0)
                        busy = 1;
        }
index 1df1761..4e78c57 100644 (file)
 
 static DEFINE_MUTEX(nf_ct_ecache_mutex);
 
+#define ECACHE_RETRY_WAIT (HZ/10)
+
+enum retry_state {
+       STATE_CONGESTED,
+       STATE_RESTART,
+       STATE_DONE,
+};
+
+static enum retry_state ecache_work_evict_list(struct ct_pcpu *pcpu)
+{
+       struct nf_conn *refs[16];
+       struct nf_conntrack_tuple_hash *h;
+       struct hlist_nulls_node *n;
+       unsigned int evicted = 0;
+       enum retry_state ret = STATE_DONE;
+
+       spin_lock(&pcpu->lock);
+
+       hlist_nulls_for_each_entry(h, n, &pcpu->dying, hnnode) {
+               struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+               if (nf_ct_is_dying(ct))
+                       continue;
+
+               if (nf_conntrack_event(IPCT_DESTROY, ct)) {
+                       ret = STATE_CONGESTED;
+                       break;
+               }
+
+               /* we've got the event delivered, now it's dying */
+               set_bit(IPS_DYING_BIT, &ct->status);
+               refs[evicted] = ct;
+
+               if (++evicted >= ARRAY_SIZE(refs)) {
+                       ret = STATE_RESTART;
+                       break;
+               }
+       }
+
+       spin_unlock(&pcpu->lock);
+
+       /* can't _put while holding lock */
+       while (evicted)
+               nf_ct_put(refs[--evicted]);
+
+       return ret;
+}
+
+static void ecache_work(struct work_struct *work)
+{
+       struct netns_ct *ctnet =
+               container_of(work, struct netns_ct, ecache_dwork.work);
+       int cpu, delay = -1;
+       struct ct_pcpu *pcpu;
+
+       local_bh_disable();
+
+       for_each_possible_cpu(cpu) {
+               enum retry_state ret;
+
+               pcpu = per_cpu_ptr(ctnet->pcpu_lists, cpu);
+
+               ret = ecache_work_evict_list(pcpu);
+
+               switch (ret) {
+               case STATE_CONGESTED:
+                       delay = ECACHE_RETRY_WAIT;
+                       goto out;
+               case STATE_RESTART:
+                       delay = 0;
+                       break;
+               case STATE_DONE:
+                       break;
+               }
+       }
+
+ out:
+       local_bh_enable();
+
+       ctnet->ecache_dwork_pending = delay > 0;
+       if (delay >= 0)
+               schedule_delayed_work(&ctnet->ecache_dwork, delay);
+}
+
 /* deliver cached events and clear cache entry - must be called with locally
  * disabled softirqs */
 void nf_ct_deliver_cached_events(struct nf_conn *ct)
@@ -157,7 +241,6 @@ EXPORT_SYMBOL_GPL(nf_ct_expect_unregister_notifier);
 
 #define NF_CT_EVENTS_DEFAULT 1
 static int nf_ct_events __read_mostly = NF_CT_EVENTS_DEFAULT;
-static int nf_ct_events_retry_timeout __read_mostly = 15*HZ;
 
 #ifdef CONFIG_SYSCTL
 static struct ctl_table event_sysctl_table[] = {
@@ -168,13 +251,6 @@ static struct ctl_table event_sysctl_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "nf_conntrack_events_retry_timeout",
-               .data           = &init_net.ct.sysctl_events_retry_timeout,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_jiffies,
-       },
        {}
 };
 #endif /* CONFIG_SYSCTL */
@@ -196,7 +272,6 @@ static int nf_conntrack_event_init_sysctl(struct net *net)
                goto out;
 
        table[0].data = &net->ct.sysctl_events;
-       table[1].data = &net->ct.sysctl_events_retry_timeout;
 
        /* Don't export sysctls to unprivileged users */
        if (net->user_ns != &init_user_ns)
@@ -238,12 +313,13 @@ static void nf_conntrack_event_fini_sysctl(struct net *net)
 int nf_conntrack_ecache_pernet_init(struct net *net)
 {
        net->ct.sysctl_events = nf_ct_events;
-       net->ct.sysctl_events_retry_timeout = nf_ct_events_retry_timeout;
+       INIT_DELAYED_WORK(&net->ct.ecache_dwork, ecache_work);
        return nf_conntrack_event_init_sysctl(net);
 }
 
 void nf_conntrack_ecache_pernet_fini(struct net *net)
 {
+       cancel_delayed_work_sync(&net->ct.ecache_dwork);
        nf_conntrack_event_fini_sysctl(net);
 }
 
index 5857963..355a5c4 100644 (file)
@@ -596,6 +596,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
 #endif
 #ifdef CONFIG_NF_CONNTRACK_MARK
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               + ctnetlink_label_size(ct)
@@ -742,8 +745,7 @@ static int ctnetlink_done(struct netlink_callback *cb)
 {
        if (cb->args[1])
                nf_ct_put((struct nf_conn *)cb->args[1]);
-       if (cb->data)
-               kfree(cb->data);
+       kfree(cb->data);
        return 0;
 }
 
@@ -1150,7 +1152,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
 static int
 ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
 {
-       struct nf_conn *ct, *last = NULL;
+       struct nf_conn *ct, *last;
        struct nf_conntrack_tuple_hash *h;
        struct hlist_nulls_node *n;
        struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,8 +1165,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
        if (cb->args[2])
                return 0;
 
-       if (cb->args[0] == nr_cpu_ids)
-               return 0;
+       last = (struct nf_conn *)cb->args[1];
 
        for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
                struct ct_pcpu *pcpu;
@@ -1174,7 +1175,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
 
                pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
                spin_lock_bh(&pcpu->lock);
-               last = (struct nf_conn *)cb->args[1];
                list = dying ? &pcpu->dying : &pcpu->unconfirmed;
 restart:
                hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1193,7 +1193,9 @@ restart:
                                                  ct);
                        rcu_read_unlock();
                        if (res < 0) {
-                               nf_conntrack_get(&ct->ct_general);
+                               if (!atomic_inc_not_zero(&ct->ct_general.use))
+                                       continue;
+                               cb->args[0] = cpu;
                                cb->args[1] = (unsigned long)ct;
                                spin_unlock_bh(&pcpu->lock);
                                goto out;
@@ -1202,10 +1204,10 @@ restart:
                if (cb->args[1]) {
                        cb->args[1] = 0;
                        goto restart;
-               } else
-                       cb->args[2] = 1;
+               }
                spin_unlock_bh(&pcpu->lock);
        }
+       cb->args[2] = 1;
 out:
        if (last)
                nf_ct_put(last);
@@ -2039,6 +2041,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
 #endif
 #ifdef CONFIG_NF_CONNTRACK_MARK
               + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
+#endif
+#ifdef CONFIG_NF_CONNTRACK_ZONES
+              + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
 #endif
               + ctnetlink_proto_size(ct)
               ;
index 85296d4..daad602 100644 (file)
 #define NF_LOG_PREFIXLEN               128
 #define NFLOGGER_NAME_LEN              64
 
-static struct list_head nf_loggers_l[NFPROTO_NUMPROTO] __read_mostly;
+static struct nf_logger __rcu *loggers[NFPROTO_NUMPROTO][NF_LOG_TYPE_MAX] __read_mostly;
 static DEFINE_MUTEX(nf_log_mutex);
 
 static struct nf_logger *__find_logger(int pf, const char *str_logger)
 {
-       struct nf_logger *t;
+       struct nf_logger *log;
+       int i;
+
+       for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
+               if (loggers[pf][i] == NULL)
+                       continue;
 
-       list_for_each_entry(t, &nf_loggers_l[pf], list[pf]) {
-               if (!strnicmp(str_logger, t->name, strlen(t->name)))
-                       return t;
+               log = rcu_dereference_protected(loggers[pf][i],
+                                               lockdep_is_held(&nf_log_mutex));
+               if (!strnicmp(str_logger, log->name, strlen(log->name)))
+                       return log;
        }
 
        return NULL;
@@ -73,17 +79,14 @@ int nf_log_register(u_int8_t pf, struct nf_logger *logger)
        if (pf >= ARRAY_SIZE(init_net.nf.nf_loggers))
                return -EINVAL;
 
-       for (i = 0; i < ARRAY_SIZE(logger->list); i++)
-               INIT_LIST_HEAD(&logger->list[i]);
-
        mutex_lock(&nf_log_mutex);
 
        if (pf == NFPROTO_UNSPEC) {
                for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
-                       list_add_tail(&(logger->list[i]), &(nf_loggers_l[i]));
+                       rcu_assign_pointer(loggers[i][logger->type], logger);
        } else {
                /* register at end of list to honor first register win */
-               list_add_tail(&logger->list[pf], &nf_loggers_l[pf]);
+               rcu_assign_pointer(loggers[pf][logger->type], logger);
        }
 
        mutex_unlock(&nf_log_mutex);
@@ -98,7 +101,7 @@ void nf_log_unregister(struct nf_logger *logger)
 
        mutex_lock(&nf_log_mutex);
        for (i = 0; i < NFPROTO_NUMPROTO; i++)
-               list_del(&logger->list[i]);
+               RCU_INIT_POINTER(loggers[i][logger->type], NULL);
        mutex_unlock(&nf_log_mutex);
 }
 EXPORT_SYMBOL(nf_log_unregister);
@@ -129,6 +132,48 @@ void nf_log_unbind_pf(struct net *net, u_int8_t pf)
 }
 EXPORT_SYMBOL(nf_log_unbind_pf);
 
+void nf_logger_request_module(int pf, enum nf_log_type type)
+{
+       if (loggers[pf][type] == NULL)
+               request_module("nf-logger-%u-%u", pf, type);
+}
+EXPORT_SYMBOL_GPL(nf_logger_request_module);
+
+int nf_logger_find_get(int pf, enum nf_log_type type)
+{
+       struct nf_logger *logger;
+       int ret = -ENOENT;
+
+       logger = loggers[pf][type];
+       if (logger == NULL)
+               request_module("nf-logger-%u-%u", pf, type);
+
+       rcu_read_lock();
+       logger = rcu_dereference(loggers[pf][type]);
+       if (logger == NULL)
+               goto out;
+
+       if (logger && try_module_get(logger->me))
+               ret = 0;
+out:
+       rcu_read_unlock();
+       return ret;
+}
+EXPORT_SYMBOL_GPL(nf_logger_find_get);
+
+void nf_logger_put(int pf, enum nf_log_type type)
+{
+       struct nf_logger *logger;
+
+       BUG_ON(loggers[pf][type] == NULL);
+
+       rcu_read_lock();
+       logger = rcu_dereference(loggers[pf][type]);
+       module_put(logger->me);
+       rcu_read_unlock();
+}
+EXPORT_SYMBOL_GPL(nf_logger_put);
+
 void nf_log_packet(struct net *net,
                   u_int8_t pf,
                   unsigned int hooknum,
@@ -143,7 +188,11 @@ void nf_log_packet(struct net *net,
        const struct nf_logger *logger;
 
        rcu_read_lock();
-       logger = rcu_dereference(net->nf.nf_loggers[pf]);
+       if (loginfo != NULL)
+               logger = rcu_dereference(loggers[pf][loginfo->type]);
+       else
+               logger = rcu_dereference(net->nf.nf_loggers[pf]);
+
        if (logger) {
                va_start(args, fmt);
                vsnprintf(prefix, sizeof(prefix), fmt, args);
@@ -154,6 +203,63 @@ void nf_log_packet(struct net *net,
 }
 EXPORT_SYMBOL(nf_log_packet);
 
+#define S_SIZE (1024 - (sizeof(unsigned int) + 1))
+
+struct nf_log_buf {
+       unsigned int    count;
+       char            buf[S_SIZE + 1];
+};
+static struct nf_log_buf emergency, *emergency_ptr = &emergency;
+
+__printf(2, 3) int nf_log_buf_add(struct nf_log_buf *m, const char *f, ...)
+{
+       va_list args;
+       int len;
+
+       if (likely(m->count < S_SIZE)) {
+               va_start(args, f);
+               len = vsnprintf(m->buf + m->count, S_SIZE - m->count, f, args);
+               va_end(args);
+               if (likely(m->count + len < S_SIZE)) {
+                       m->count += len;
+                       return 0;
+               }
+       }
+       m->count = S_SIZE;
+       printk_once(KERN_ERR KBUILD_MODNAME " please increase S_SIZE\n");
+       return -1;
+}
+EXPORT_SYMBOL_GPL(nf_log_buf_add);
+
+struct nf_log_buf *nf_log_buf_open(void)
+{
+       struct nf_log_buf *m = kmalloc(sizeof(*m), GFP_ATOMIC);
+
+       if (unlikely(!m)) {
+               local_bh_disable();
+               do {
+                       m = xchg(&emergency_ptr, NULL);
+               } while (!m);
+       }
+       m->count = 0;
+       return m;
+}
+EXPORT_SYMBOL_GPL(nf_log_buf_open);
+
+void nf_log_buf_close(struct nf_log_buf *m)
+{
+       m->buf[m->count] = 0;
+       printk("%s\n", m->buf);
+
+       if (likely(m != &emergency))
+               kfree(m);
+       else {
+               emergency_ptr = m;
+               local_bh_enable();
+       }
+}
+EXPORT_SYMBOL_GPL(nf_log_buf_close);
+
 #ifdef CONFIG_PROC_FS
 static void *seq_start(struct seq_file *seq, loff_t *pos)
 {
@@ -188,8 +294,7 @@ static int seq_show(struct seq_file *s, void *v)
 {
        loff_t *pos = v;
        const struct nf_logger *logger;
-       struct nf_logger *t;
-       int ret;
+       int i, ret;
        struct net *net = seq_file_net(s);
 
        logger = rcu_dereference_protected(net->nf.nf_loggers[*pos],
@@ -203,11 +308,16 @@ static int seq_show(struct seq_file *s, void *v)
        if (ret < 0)
                return ret;
 
-       list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
-               ret = seq_printf(s, "%s", t->name);
+       for (i = 0; i < NF_LOG_TYPE_MAX; i++) {
+               if (loggers[*pos][i] == NULL)
+                       continue;
+
+               logger = rcu_dereference_protected(loggers[*pos][i],
+                                          lockdep_is_held(&nf_log_mutex));
+               ret = seq_printf(s, "%s", logger->name);
                if (ret < 0)
                        return ret;
-               if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
+               if (i == 0 && loggers[*pos][i + 1] != NULL) {
                        ret = seq_printf(s, ",");
                        if (ret < 0)
                                return ret;
@@ -389,14 +499,5 @@ static struct pernet_operations nf_log_net_ops = {
 
 int __init netfilter_log_init(void)
 {
-       int i, ret;
-
-       ret = register_pernet_subsys(&nf_log_net_ops);
-       if (ret < 0)
-               return ret;
-
-       for (i = NFPROTO_UNSPEC; i < NFPROTO_NUMPROTO; i++)
-               INIT_LIST_HEAD(&(nf_loggers_l[i]));
-
-       return 0;
+       return register_pernet_subsys(&nf_log_net_ops);
 }
diff --git a/net/netfilter/nf_log_common.c b/net/netfilter/nf_log_common.c
new file mode 100644 (file)
index 0000000..eeb8ef4
--- /dev/null
@@ -0,0 +1,187 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/ip.h>
+#include <net/icmp.h>
+#include <net/udp.h>
+#include <net/tcp.h>
+#include <net/route.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter/xt_LOG.h>
+#include <net/netfilter/nf_log.h>
+
+int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset)
+{
+       struct udphdr _udph;
+       const struct udphdr *uh;
+
+       if (proto == IPPROTO_UDP)
+               /* Max length: 10 "PROTO=UDP "     */
+               nf_log_buf_add(m, "PROTO=UDP ");
+       else    /* Max length: 14 "PROTO=UDPLITE " */
+               nf_log_buf_add(m, "PROTO=UDPLITE ");
+
+       if (fragment)
+               goto out;
+
+       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+       uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
+       if (uh == NULL) {
+               nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+
+               return 1;
+       }
+
+       /* Max length: 20 "SPT=65535 DPT=65535 " */
+       nf_log_buf_add(m, "SPT=%u DPT=%u LEN=%u ",
+                      ntohs(uh->source), ntohs(uh->dest), ntohs(uh->len));
+
+out:
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_udp_header);
+
+int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
+                          u8 proto, int fragment, unsigned int offset,
+                          unsigned int logflags)
+{
+       struct tcphdr _tcph;
+       const struct tcphdr *th;
+
+       /* Max length: 10 "PROTO=TCP " */
+       nf_log_buf_add(m, "PROTO=TCP ");
+
+       if (fragment)
+               return 0;
+
+       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
+       th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
+       if (th == NULL) {
+               nf_log_buf_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
+               return 1;
+       }
+
+       /* Max length: 20 "SPT=65535 DPT=65535 " */
+       nf_log_buf_add(m, "SPT=%u DPT=%u ",
+                      ntohs(th->source), ntohs(th->dest));
+       /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
+       if (logflags & XT_LOG_TCPSEQ) {
+               nf_log_buf_add(m, "SEQ=%u ACK=%u ",
+                              ntohl(th->seq), ntohl(th->ack_seq));
+       }
+
+       /* Max length: 13 "WINDOW=65535 " */
+       nf_log_buf_add(m, "WINDOW=%u ", ntohs(th->window));
+       /* Max length: 9 "RES=0x3C " */
+       nf_log_buf_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
+                                           TCP_RESERVED_BITS) >> 22));
+       /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
+       if (th->cwr)
+               nf_log_buf_add(m, "CWR ");
+       if (th->ece)
+               nf_log_buf_add(m, "ECE ");
+       if (th->urg)
+               nf_log_buf_add(m, "URG ");
+       if (th->ack)
+               nf_log_buf_add(m, "ACK ");
+       if (th->psh)
+               nf_log_buf_add(m, "PSH ");
+       if (th->rst)
+               nf_log_buf_add(m, "RST ");
+       if (th->syn)
+               nf_log_buf_add(m, "SYN ");
+       if (th->fin)
+               nf_log_buf_add(m, "FIN ");
+       /* Max length: 11 "URGP=65535 " */
+       nf_log_buf_add(m, "URGP=%u ", ntohs(th->urg_ptr));
+
+       if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
+               u_int8_t _opt[60 - sizeof(struct tcphdr)];
+               const u_int8_t *op;
+               unsigned int i;
+               unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
+
+               op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
+                                       optsize, _opt);
+               if (op == NULL) {
+                       nf_log_buf_add(m, "OPT (TRUNCATED)");
+                       return 1;
+               }
+
+               /* Max length: 127 "OPT (" 15*4*2chars ") " */
+               nf_log_buf_add(m, "OPT (");
+               for (i = 0; i < optsize; i++)
+                       nf_log_buf_add(m, "%02X", op[i]);
+
+               nf_log_buf_add(m, ") ");
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_tcp_header);
+
+void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk)
+{
+       if (!sk || sk->sk_state == TCP_TIME_WAIT)
+               return;
+
+       read_lock_bh(&sk->sk_callback_lock);
+       if (sk->sk_socket && sk->sk_socket->file) {
+               const struct cred *cred = sk->sk_socket->file->f_cred;
+               nf_log_buf_add(m, "UID=%u GID=%u ",
+                       from_kuid_munged(&init_user_ns, cred->fsuid),
+                       from_kgid_munged(&init_user_ns, cred->fsgid));
+       }
+       read_unlock_bh(&sk->sk_callback_lock);
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_sk_uid_gid);
+
+void
+nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
+                         unsigned int hooknum, const struct sk_buff *skb,
+                         const struct net_device *in,
+                         const struct net_device *out,
+                         const struct nf_loginfo *loginfo, const char *prefix)
+{
+       nf_log_buf_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
+              '0' + loginfo->u.log.level, prefix,
+              in ? in->name : "",
+              out ? out->name : "");
+#ifdef CONFIG_BRIDGE_NETFILTER
+       if (skb->nf_bridge) {
+               const struct net_device *physindev;
+               const struct net_device *physoutdev;
+
+               physindev = skb->nf_bridge->physindev;
+               if (physindev && in != physindev)
+                       nf_log_buf_add(m, "PHYSIN=%s ", physindev->name);
+               physoutdev = skb->nf_bridge->physoutdev;
+               if (physoutdev && out != physoutdev)
+                       nf_log_buf_add(m, "PHYSOUT=%s ", physoutdev->name);
+       }
+#endif
+}
+EXPORT_SYMBOL_GPL(nf_log_dump_packet_common);
+
+static int __init nf_log_common_init(void)
+{
+       return 0;
+}
+
+static void __exit nf_log_common_exit(void) {}
+
+module_init(nf_log_common_init);
+module_exit(nf_log_common_exit);
+
+MODULE_LICENSE("GPL");
index 09096a6..552f97c 100644 (file)
@@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
        return i->status & IPS_NAT_MASK ? 1 : 0;
 }
 
+static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
+{
+       struct nf_conn_nat *nat = nfct_nat(ct);
+
+       if (nf_nat_proto_remove(ct, data))
+               return 1;
+
+       if (!nat || !nat->ct)
+               return 0;
+
+       /* This netns is being destroyed, and conntrack has nat null binding.
+        * Remove it from bysource hash, as the table will be freed soon.
+        *
+        * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
+        * will delete entry from already-freed table.
+        */
+       if (!del_timer(&ct->timeout))
+               return 1;
+
+       spin_lock_bh(&nf_nat_lock);
+       hlist_del_rcu(&nat->bysource);
+       ct->status &= ~IPS_NAT_DONE_MASK;
+       nat->ct = NULL;
+       spin_unlock_bh(&nf_nat_lock);
+
+       add_timer(&ct->timeout);
+
+       /* don't delete conntrack.  Although that would make things a lot
+        * simpler, we'd end up flushing all conntracks on nat rmmod.
+        */
+       return 0;
+}
+
 static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
 {
        struct nf_nat_proto_clean clean = {
@@ -677,7 +710,7 @@ static struct nf_ct_ext_type nat_extend __read_mostly = {
        .flags          = NF_CT_EXT_F_PREALLOC,
 };
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
@@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
 {
        struct nf_nat_proto_clean clean = {};
 
-       nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0);
+       nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
        synchronize_rcu();
        nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
 }
index 83a72a2..fbce552 100644 (file)
@@ -95,7 +95,7 @@ void nf_nat_l4proto_unique_tuple(const struct nf_nat_l3proto *l3proto,
 }
 EXPORT_SYMBOL_GPL(nf_nat_l4proto_unique_tuple);
 
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
 int nf_nat_l4proto_nlattr_to_range(struct nlattr *tb[],
                                   struct nf_nat_range *range)
 {
index c8be2cd..b8067b5 100644 (file)
@@ -78,7 +78,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_dccp = {
        .manip_pkt              = dccp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = dccp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 754536f..cbc7ade 100644 (file)
@@ -59,7 +59,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_sctp = {
        .manip_pkt              = sctp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = sctp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 83ec8a6..37f5505 100644 (file)
@@ -79,7 +79,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_tcp = {
        .manip_pkt              = tcp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = tcp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 7df613f..b0ede2f 100644 (file)
@@ -70,7 +70,7 @@ const struct nf_nat_l4proto nf_nat_l4proto_udp = {
        .manip_pkt              = udp_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = udp_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 776a0d1..368f14e 100644 (file)
@@ -69,7 +69,7 @@ static const struct nf_nat_l4proto nf_nat_l4proto_udplite = {
        .manip_pkt              = udplite_manip_pkt,
        .in_range               = nf_nat_l4proto_in_range,
        .unique_tuple           = udplite_unique_tuple,
-#if defined(CONFIG_NF_CT_NETLINK) || defined(CONFIG_NF_CT_NETLINK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
        .nlattr_to_range        = nf_nat_l4proto_nlattr_to_range,
 #endif
 };
index 624e083..93692d6 100644 (file)
@@ -35,7 +35,7 @@ int nft_register_afinfo(struct net *net, struct nft_af_info *afi)
 {
        INIT_LIST_HEAD(&afi->tables);
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&afi->list, &net->nft.af_info);
+       list_add_tail_rcu(&afi->list, &net->nft.af_info);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -51,7 +51,7 @@ EXPORT_SYMBOL_GPL(nft_register_afinfo);
 void nft_unregister_afinfo(struct nft_af_info *afi)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&afi->list);
+       list_del_rcu(&afi->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_afinfo);
@@ -277,11 +277,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
                        if (idx < s_idx)
                                goto cont;
                        if (idx > s_idx)
@@ -294,11 +297,14 @@ static int nf_tables_dump_tables(struct sk_buff *skb,
                                                      NLM_F_MULTI,
                                                      afi->family, table) < 0)
                                goto done;
+
+                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                        idx++;
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
@@ -407,6 +413,9 @@ static int nf_tables_updtable(struct nft_ctx *ctx)
        if (flags & ~NFT_TABLE_F_DORMANT)
                return -EINVAL;
 
+       if (flags == ctx->table->flags)
+               return 0;
+
        trans = nft_trans_alloc(ctx, NFT_MSG_NEWTABLE,
                                sizeof(struct nft_trans_table));
        if (trans == NULL)
@@ -514,7 +523,7 @@ static int nf_tables_newtable(struct sock *nlsk, struct sk_buff *skb,
                module_put(afi->owner);
                return err;
        }
-       list_add_tail(&table->list, &afi->tables);
+       list_add_tail_rcu(&table->list, &afi->tables);
        return 0;
 }
 
@@ -546,7 +555,7 @@ static int nf_tables_deltable(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&table->list);
+       list_del_rcu(&table->list);
        return 0;
 }
 
@@ -635,13 +644,20 @@ static int nft_dump_stats(struct sk_buff *skb, struct nft_stats __percpu *stats)
 {
        struct nft_stats *cpu_stats, total;
        struct nlattr *nest;
+       unsigned int seq;
+       u64 pkts, bytes;
        int cpu;
 
        memset(&total, 0, sizeof(total));
        for_each_possible_cpu(cpu) {
                cpu_stats = per_cpu_ptr(stats, cpu);
-               total.pkts += cpu_stats->pkts;
-               total.bytes += cpu_stats->bytes;
+               do {
+                       seq = u64_stats_fetch_begin_irq(&cpu_stats->syncp);
+                       pkts = cpu_stats->pkts;
+                       bytes = cpu_stats->bytes;
+               } while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
+               total.pkts += pkts;
+               total.bytes += bytes;
        }
        nest = nla_nest_start(skb, NFTA_CHAIN_COUNTERS);
        if (nest == NULL)
@@ -761,12 +777,15 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
                                if (idx < s_idx)
                                        goto cont;
                                if (idx > s_idx)
@@ -778,17 +797,19 @@ static int nf_tables_dump_chains(struct sk_buff *skb,
                                                              NLM_F_MULTI,
                                                              afi->family, table, chain) < 0)
                                        goto done;
+
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
                }
        }
 done:
+       rcu_read_unlock();
        cb->args[0] = idx;
        return skb->len;
 }
 
-
 static int nf_tables_getchain(struct sock *nlsk, struct sk_buff *skb,
                              const struct nlmsghdr *nlh,
                              const struct nlattr * const nla[])
@@ -861,7 +882,7 @@ static struct nft_stats __percpu *nft_stats_alloc(const struct nlattr *attr)
        if (!tb[NFTA_COUNTER_BYTES] || !tb[NFTA_COUNTER_PACKETS])
                return ERR_PTR(-EINVAL);
 
-       newstats = alloc_percpu(struct nft_stats);
+       newstats = netdev_alloc_pcpu_stats(struct nft_stats);
        if (newstats == NULL)
                return ERR_PTR(-ENOMEM);
 
@@ -1077,7 +1098,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                        }
                        basechain->stats = stats;
                } else {
-                       stats = alloc_percpu(struct nft_stats);
+                       stats = netdev_alloc_pcpu_stats(struct nft_stats);
                        if (IS_ERR(stats)) {
                                module_put(type->owner);
                                kfree(basechain);
@@ -1130,7 +1151,7 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
                goto err2;
 
        table->use++;
-       list_add_tail(&chain->list, &table->chains);
+       list_add_tail_rcu(&chain->list, &table->chains);
        return 0;
 err2:
        if (!(table->flags & NFT_TABLE_F_DORMANT) &&
@@ -1180,7 +1201,7 @@ static int nf_tables_delchain(struct sock *nlsk, struct sk_buff *skb,
                return err;
 
        table->use--;
-       list_del(&chain->list);
+       list_del_rcu(&chain->list);
        return 0;
 }
 
@@ -1199,9 +1220,9 @@ int nft_register_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
        if (type->family == NFPROTO_UNSPEC)
-               list_add_tail(&type->list, &nf_tables_expressions);
+               list_add_tail_rcu(&type->list, &nf_tables_expressions);
        else
-               list_add(&type->list, &nf_tables_expressions);
+               list_add_rcu(&type->list, &nf_tables_expressions);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1216,7 +1237,7 @@ EXPORT_SYMBOL_GPL(nft_register_expr);
 void nft_unregister_expr(struct nft_expr_type *type)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&type->list);
+       list_del_rcu(&type->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_expr);
@@ -1549,16 +1570,17 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
        unsigned int idx = 0, s_idx = cb->args[0];
        struct net *net = sock_net(skb->sk);
        int family = nfmsg->nfgen_family;
-       u8 genctr = ACCESS_ONCE(net->nft.genctr);
-       u8 gencursor = ACCESS_ONCE(net->nft.gencursor);
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
                if (family != NFPROTO_UNSPEC && family != afi->family)
                        continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
-                       list_for_each_entry(chain, &table->chains, list) {
-                               list_for_each_entry(rule, &chain->rules, list) {
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       list_for_each_entry_rcu(chain, &table->chains, list) {
+                               list_for_each_entry_rcu(rule, &chain->rules, list) {
                                        if (!nft_rule_is_active(net, rule))
                                                goto cont;
                                        if (idx < s_idx)
@@ -1572,6 +1594,8 @@ static int nf_tables_dump_rules(struct sk_buff *skb,
                                                                      NLM_F_MULTI | NLM_F_APPEND,
                                                                      afi->family, table, chain, rule) < 0)
                                                goto done;
+
+                                       nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                        idx++;
                                }
@@ -1579,9 +1603,7 @@ cont:
                }
        }
 done:
-       /* Invalidate this dump, a transition to the new generation happened */
-       if (gencursor != net->nft.gencursor || genctr != net->nft.genctr)
-               return -EBUSY;
+       rcu_read_unlock();
 
        cb->args[0] = idx;
        return skb->len;
@@ -1730,6 +1752,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
                if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
                        return -EINVAL;
                handle = nf_tables_alloc_handle(table);
+
+               if (chain->use == UINT_MAX)
+                       return -EOVERFLOW;
        }
 
        if (nla[NFTA_RULE_POSITION]) {
@@ -1789,14 +1814,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
 
        if (nlh->nlmsg_flags & NLM_F_REPLACE) {
                if (nft_rule_is_active_next(net, old_rule)) {
-                       trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE,
+                       trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
                                                   old_rule);
                        if (trans == NULL) {
                                err = -ENOMEM;
                                goto err2;
                        }
                        nft_rule_disactivate_next(net, old_rule);
-                       list_add_tail(&rule->list, &old_rule->list);
+                       chain->use--;
+                       list_add_tail_rcu(&rule->list, &old_rule->list);
                } else {
                        err = -ENOENT;
                        goto err2;
@@ -1826,6 +1852,7 @@ err3:
                list_del_rcu(&nft_trans_rule(trans)->list);
                nft_rule_clear(net, nft_trans_rule(trans));
                nft_trans_destroy(trans);
+               chain->use++;
        }
 err2:
        nf_tables_rule_destroy(&ctx, rule);
@@ -1927,7 +1954,7 @@ static LIST_HEAD(nf_tables_set_ops);
 int nft_register_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_add_tail(&ops->list, &nf_tables_set_ops);
+       list_add_tail_rcu(&ops->list, &nf_tables_set_ops);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
        return 0;
 }
@@ -1936,7 +1963,7 @@ EXPORT_SYMBOL_GPL(nft_register_set);
 void nft_unregister_set(struct nft_set_ops *ops)
 {
        nfnl_lock(NFNL_SUBSYS_NFTABLES);
-       list_del(&ops->list);
+       list_del_rcu(&ops->list);
        nfnl_unlock(NFNL_SUBSYS_NFTABLES);
 }
 EXPORT_SYMBOL_GPL(nft_unregister_set);
@@ -2220,70 +2247,7 @@ err:
        return err;
 }
 
-static int nf_tables_dump_sets_table(struct nft_ctx *ctx, struct sk_buff *skb,
-                                    struct netlink_callback *cb)
-{
-       const struct nft_set *set;
-       unsigned int idx = 0, s_idx = cb->args[0];
-
-       if (cb->args[1])
-               return skb->len;
-
-       list_for_each_entry(set, &ctx->table->sets, list) {
-               if (idx < s_idx)
-                       goto cont;
-               if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
-                                      NLM_F_MULTI) < 0) {
-                       cb->args[0] = idx;
-                       goto done;
-               }
-cont:
-               idx++;
-       }
-       cb->args[1] = 1;
-done:
-       return skb->len;
-}
-
-static int nf_tables_dump_sets_family(struct nft_ctx *ctx, struct sk_buff *skb,
-                                     struct netlink_callback *cb)
-{
-       const struct nft_set *set;
-       unsigned int idx, s_idx = cb->args[0];
-       struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
-
-       if (cb->args[1])
-               return skb->len;
-
-       list_for_each_entry(table, &ctx->afi->tables, list) {
-               if (cur_table) {
-                       if (cur_table != table)
-                               continue;
-
-                       cur_table = NULL;
-               }
-               ctx->table = table;
-               idx = 0;
-               list_for_each_entry(set, &ctx->table->sets, list) {
-                       if (idx < s_idx)
-                               goto cont;
-                       if (nf_tables_fill_set(skb, ctx, set, NFT_MSG_NEWSET,
-                                              NLM_F_MULTI) < 0) {
-                               cb->args[0] = idx;
-                               cb->args[2] = (unsigned long) table;
-                               goto done;
-                       }
-cont:
-                       idx++;
-               }
-       }
-       cb->args[1] = 1;
-done:
-       return skb->len;
-}
-
-static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
-                                  struct netlink_callback *cb)
+static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
 {
        const struct nft_set *set;
        unsigned int idx, s_idx = cb->args[0];
@@ -2291,33 +2255,43 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
        struct nft_table *table, *cur_table = (struct nft_table *)cb->args[2];
        struct net *net = sock_net(skb->sk);
        int cur_family = cb->args[3];
+       struct nft_ctx *ctx = cb->data, ctx_set;
 
        if (cb->args[1])
                return skb->len;
 
-       list_for_each_entry(afi, &net->nft.af_info, list) {
+       rcu_read_lock();
+       cb->seq = net->nft.base_seq;
+
+       list_for_each_entry_rcu(afi, &net->nft.af_info, list) {
+               if (ctx->afi && ctx->afi != afi)
+                       continue;
+
                if (cur_family) {
                        if (afi->family != cur_family)
                                continue;
 
                        cur_family = 0;
                }
+               list_for_each_entry_rcu(table, &afi->tables, list) {
+                       if (ctx->table && ctx->table != table)
+                               continue;
 
-               list_for_each_entry(table, &afi->tables, list) {
                        if (cur_table) {
                                if (cur_table != table)
                                        continue;
 
                                cur_table = NULL;
                        }
-
-                       ctx->table = table;
-                       ctx->afi = afi;
                        idx = 0;
-                       list_for_each_entry(set, &ctx->table->sets, list) {
+                       list_for_each_entry_rcu(set, &table->sets, list) {
                                if (idx < s_idx)
                                        goto cont;
-                               if (nf_tables_fill_set(skb, ctx, set,
+
+                               ctx_set = *ctx;
+                               ctx_set.table = table;
+                               ctx_set.afi = afi;
+                               if (nf_tables_fill_set(skb, &ctx_set, set,
                                                       NFT_MSG_NEWSET,
                                                       NLM_F_MULTI) < 0) {
                                        cb->args[0] = idx;
@@ -2325,6 +2299,7 @@ static int nf_tables_dump_sets_all(struct nft_ctx *ctx, struct sk_buff *skb,
                                        cb->args[3] = afi->family;
                                        goto done;
                                }
+                               nl_dump_check_consistent(cb, nlmsg_hdr(skb));
 cont:
                                idx++;
                        }
@@ -2334,34 +2309,14 @@ cont:
        }
        cb->args[1] = 1;
 done:
+       rcu_read_unlock();
        return skb->len;
 }
 
-static int nf_tables_dump_sets(struct sk_buff *skb, struct netlink_callback *cb)
+static int nf_tables_dump_sets_done(struct netlink_callback *cb)
 {
-       const struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
-       struct nlattr *nla[NFTA_SET_MAX + 1];
-       struct nft_ctx ctx;
-       int err, ret;
-
-       err = nlmsg_parse(cb->nlh, sizeof(*nfmsg), nla, NFTA_SET_MAX,
-                         nft_set_policy);
-       if (err < 0)
-               return err;
-
-       err = nft_ctx_init_from_setattr(&ctx, cb->skb, cb->nlh, (void *)nla);
-       if (err < 0)
-               return err;
-
-       if (ctx.table == NULL) {
-               if (ctx.afi == NULL)
-                       ret = nf_tables_dump_sets_all(&ctx, skb, cb);
-               else
-                       ret = nf_tables_dump_sets_family(&ctx, skb, cb);
-       } else
-               ret = nf_tables_dump_sets_table(&ctx, skb, cb);
-
-       return ret;
+       kfree(cb->data);
+       return 0;
 }
 
 #define NFT_SET_INACTIVE       (1 << 15)       /* Internal set flag */
@@ -2384,7 +2339,17 @@ static int nf_tables_getset(struct sock *nlsk, struct sk_buff *skb,
        if (nlh->nlmsg_flags & NLM_F_DUMP) {
                struct netlink_dump_control c = {
                        .dump = nf_tables_dump_sets,
+                       .done = nf_tables_dump_sets_done,
                };
+               struct nft_ctx *ctx_dump;
+
+               ctx_dump = kmalloc(sizeof(*ctx_dump), GFP_KERNEL);
+               if (ctx_dump == NULL)
+                       return -ENOMEM;
+
+               *ctx_dump = ctx;
+               c.data = ctx_dump;
+
                return netlink_dump_start(nlsk, skb, nlh, &c);
        }
 
@@ -2592,7 +2557,7 @@ static int nf_tables_newset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                goto err2;
 
-       list_add_tail(&set->list, &table->sets);
+       list_add_tail_rcu(&set->list, &table->sets);
        table->use++;
        return 0;
 
@@ -2612,7 +2577,7 @@ static void nft_set_destroy(struct nft_set *set)
 
 static void nf_tables_set_destroy(const struct nft_ctx *ctx, struct nft_set *set)
 {
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        nf_tables_set_notify(ctx, set, NFT_MSG_DELSET, GFP_ATOMIC);
        nft_set_destroy(set);
 }
@@ -2647,7 +2612,7 @@ static int nf_tables_delset(struct sock *nlsk, struct sk_buff *skb,
        if (err < 0)
                return err;
 
-       list_del(&set->list);
+       list_del_rcu(&set->list);
        ctx.table->use--;
        return 0;
 }
@@ -2699,14 +2664,14 @@ int nf_tables_bind_set(const struct nft_ctx *ctx, struct nft_set *set,
        }
 bind:
        binding->chain = ctx->chain;
-       list_add_tail(&binding->list, &set->bindings);
+       list_add_tail_rcu(&binding->list, &set->bindings);
        return 0;
 }
 
 void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
                          struct nft_set_binding *binding)
 {
-       list_del(&binding->list);
+       list_del_rcu(&binding->list);
 
        if (list_empty(&set->bindings) && set->flags & NFT_SET_ANONYMOUS &&
            !(set->flags & NFT_SET_INACTIVE))
@@ -2845,7 +2810,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
                goto nla_put_failure;
 
        nfmsg = nlmsg_data(nlh);
-       nfmsg->nfgen_family = NFPROTO_UNSPEC;
+       nfmsg->nfgen_family = ctx.afi->family;
        nfmsg->version      = NFNETLINK_V0;
        nfmsg->res_id       = 0;
 
@@ -3108,6 +3073,9 @@ static int nf_tables_newsetelem(struct sock *nlsk, struct sk_buff *skb,
        struct nft_ctx ctx;
        int rem, err = 0;
 
+       if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
+               return -EINVAL;
+
        err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, true);
        if (err < 0)
                return err;
@@ -3191,6 +3159,9 @@ static int nf_tables_delsetelem(struct sock *nlsk, struct sk_buff *skb,
        struct nft_ctx ctx;
        int rem, err = 0;
 
+       if (nla[NFTA_SET_ELEM_LIST_ELEMENTS] == NULL)
+               return -EINVAL;
+
        err = nft_ctx_init_from_elemattr(&ctx, skb, nlh, nla, false);
        if (err < 0)
                return err;
@@ -3341,7 +3312,7 @@ static int nf_tables_commit(struct sk_buff *skb)
        struct nft_set *set;
 
        /* Bump generation counter, invalidate any dump in progress */
-       net->nft.genctr++;
+       while (++net->nft.base_seq == 0);
 
        /* A new generation has just started */
        net->nft.gencursor = gencursor_next(net);
@@ -3486,12 +3457,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                                }
                                nft_trans_destroy(trans);
                        } else {
-                               list_del(&trans->ctx.table->list);
+                               list_del_rcu(&trans->ctx.table->list);
                        }
                        break;
                case NFT_MSG_DELTABLE:
-                       list_add_tail(&trans->ctx.table->list,
-                                     &trans->ctx.afi->tables);
+                       list_add_tail_rcu(&trans->ctx.table->list,
+                                         &trans->ctx.afi->tables);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWCHAIN:
@@ -3502,7 +3473,7 @@ static int nf_tables_abort(struct sk_buff *skb)
                                nft_trans_destroy(trans);
                        } else {
                                trans->ctx.table->use--;
-                               list_del(&trans->ctx.chain->list);
+                               list_del_rcu(&trans->ctx.chain->list);
                                if (!(trans->ctx.table->flags & NFT_TABLE_F_DORMANT) &&
                                    trans->ctx.chain->flags & NFT_BASE_CHAIN) {
                                        nf_unregister_hooks(nft_base_chain(trans->ctx.chain)->ops,
@@ -3512,8 +3483,8 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_DELCHAIN:
                        trans->ctx.table->use++;
-                       list_add_tail(&trans->ctx.chain->list,
-                                     &trans->ctx.table->chains);
+                       list_add_tail_rcu(&trans->ctx.chain->list,
+                                         &trans->ctx.table->chains);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWRULE:
@@ -3527,12 +3498,12 @@ static int nf_tables_abort(struct sk_buff *skb)
                        break;
                case NFT_MSG_NEWSET:
                        trans->ctx.table->use--;
-                       list_del(&nft_trans_set(trans)->list);
+                       list_del_rcu(&nft_trans_set(trans)->list);
                        break;
                case NFT_MSG_DELSET:
                        trans->ctx.table->use++;
-                       list_add_tail(&nft_trans_set(trans)->list,
-                                     &trans->ctx.table->sets);
+                       list_add_tail_rcu(&nft_trans_set(trans)->list,
+                                         &trans->ctx.table->sets);
                        nft_trans_destroy(trans);
                        break;
                case NFT_MSG_NEWSETELEM:
@@ -3946,6 +3917,7 @@ static int nf_tables_init_net(struct net *net)
 {
        INIT_LIST_HEAD(&net->nft.af_info);
        INIT_LIST_HEAD(&net->nft.commit_list);
+       net->nft.base_seq = 1;
        return 0;
 }
 
index 345acfb..3b90eb2 100644 (file)
@@ -109,7 +109,7 @@ nft_do_chain(struct nft_pktinfo *pkt, const struct nf_hook_ops *ops)
        struct nft_data data[NFT_REG_MAX + 1];
        unsigned int stackptr = 0;
        struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE];
-       struct nft_stats __percpu *stats;
+       struct nft_stats *stats;
        int rulenum;
        /*
         * Cache cursor to avoid problems in case that the cursor is updated
@@ -205,9 +205,11 @@ next_rule:
                nft_trace_packet(pkt, basechain, -1, NFT_TRACE_POLICY);
 
        rcu_read_lock_bh();
-       stats = rcu_dereference(nft_base_chain(basechain)->stats);
-       __this_cpu_inc(stats->pkts);
-       __this_cpu_add(stats->bytes, pkt->skb->len);
+       stats = this_cpu_ptr(rcu_dereference(nft_base_chain(basechain)->stats));
+       u64_stats_update_begin(&stats->syncp);
+       stats->pkts++;
+       stats->bytes += pkt->skb->len;
+       u64_stats_update_end(&stats->syncp);
        rcu_read_unlock_bh();
 
        return nft_base_chain(basechain)->policy;
index 2baa125..3ea0eac 100644 (file)
@@ -41,6 +41,7 @@ struct nf_acct {
 };
 
 #define NFACCT_F_QUOTA (NFACCT_F_QUOTA_PKTS | NFACCT_F_QUOTA_BYTES)
+#define NFACCT_OVERQUOTA_BIT   2       /* NFACCT_F_OVERQUOTA */
 
 static int
 nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
@@ -77,7 +78,8 @@ nfnl_acct_new(struct sock *nfnl, struct sk_buff *skb,
                        smp_mb__before_atomic();
                        /* reset overquota flag if quota is enabled. */
                        if ((matching->flags & NFACCT_F_QUOTA))
-                               clear_bit(NFACCT_F_OVERQUOTA, &matching->flags);
+                               clear_bit(NFACCT_OVERQUOTA_BIT,
+                                         &matching->flags);
                        return 0;
                }
                return -EBUSY;
@@ -129,6 +131,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
        struct nfgenmsg *nfmsg;
        unsigned int flags = portid ? NLM_F_MULTI : 0;
        u64 pkts, bytes;
+       u32 old_flags;
 
        event |= NFNL_SUBSYS_ACCT << 8;
        nlh = nlmsg_put(skb, portid, seq, event, sizeof(*nfmsg), flags);
@@ -143,12 +146,13 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
        if (nla_put_string(skb, NFACCT_NAME, acct->name))
                goto nla_put_failure;
 
+       old_flags = acct->flags;
        if (type == NFNL_MSG_ACCT_GET_CTRZERO) {
                pkts = atomic64_xchg(&acct->pkts, 0);
                bytes = atomic64_xchg(&acct->bytes, 0);
                smp_mb__before_atomic();
                if (acct->flags & NFACCT_F_QUOTA)
-                       clear_bit(NFACCT_F_OVERQUOTA, &acct->flags);
+                       clear_bit(NFACCT_OVERQUOTA_BIT, &acct->flags);
        } else {
                pkts = atomic64_read(&acct->pkts);
                bytes = atomic64_read(&acct->bytes);
@@ -160,7 +164,7 @@ nfnl_acct_fill_info(struct sk_buff *skb, u32 portid, u32 seq, u32 type,
        if (acct->flags & NFACCT_F_QUOTA) {
                u64 *quota = (u64 *)acct->data;
 
-               if (nla_put_be32(skb, NFACCT_FLAGS, htonl(acct->flags)) ||
+               if (nla_put_be32(skb, NFACCT_FLAGS, htonl(old_flags)) ||
                    nla_put_be64(skb, NFACCT_QUOTA, cpu_to_be64(*quota)))
                        goto nla_put_failure;
        }
@@ -412,7 +416,7 @@ int nfnl_acct_overquota(const struct sk_buff *skb, struct nf_acct *nfacct)
        ret = now > *quota;
 
        if (now >= *quota &&
-           !test_and_set_bit(NFACCT_F_OVERQUOTA, &nfacct->flags)) {
+           !test_and_set_bit(NFACCT_OVERQUOTA_BIT, &nfacct->flags)) {
                nfnl_overquota_report(nfacct);
        }
 
index d292c8d..a11c5ff 100644 (file)
@@ -773,6 +773,7 @@ nfulnl_recv_unsupp(struct sock *ctnl, struct sk_buff *skb,
 
 static struct nf_logger nfulnl_logger __read_mostly = {
        .name   = "nfnetlink_log",
+       .type   = NF_LOG_TYPE_ULOG,
        .logfn  = &nfulnl_log_packet,
        .me     = THIS_MODULE,
 };
@@ -1105,6 +1106,9 @@ MODULE_DESCRIPTION("netfilter userspace logging");
 MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_ULOG);
+MODULE_ALIAS_NF_LOGGER(AF_INET, 1);
+MODULE_ALIAS_NF_LOGGER(AF_INET6, 1);
+MODULE_ALIAS_NF_LOGGER(AF_BRIDGE, 1);
 
 module_init(nfnetlink_log_init);
 module_exit(nfnetlink_log_fini);
index 8a779be..1840989 100644 (file)
@@ -195,6 +195,15 @@ static void
 nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 {
        struct xt_target *target = expr->ops->data;
+       void *info = nft_expr_priv(expr);
+       struct xt_tgdtor_param par;
+
+       par.net = ctx->net;
+       par.target = target;
+       par.targinfo = info;
+       par.family = ctx->afi->family;
+       if (par.target->destroy != NULL)
+               par.target->destroy(&par);
 
        module_put(target->me);
 }
@@ -382,6 +391,15 @@ static void
 nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
 {
        struct xt_match *match = expr->ops->data;
+       void *info = nft_expr_priv(expr);
+       struct xt_mtdtor_param par;
+
+       par.net = ctx->net;
+       par.match = match;
+       par.matchinfo = info;
+       par.family = ctx->afi->family;
+       if (par.match->destroy != NULL)
+               par.match->destroy(&par);
 
        module_put(match->me);
 }
index 4080ed6..28fb8f3 100644 (file)
 #include <linux/log2.h>
 #include <linux/jhash.h>
 #include <linux/netlink.h>
-#include <linux/vmalloc.h>
+#include <linux/rhashtable.h>
 #include <linux/netfilter.h>
 #include <linux/netfilter/nf_tables.h>
 #include <net/netfilter/nf_tables.h>
 
-#define NFT_HASH_MIN_SIZE      4UL
-
-struct nft_hash {
-       struct nft_hash_table __rcu     *tbl;
-};
-
-struct nft_hash_table {
-       unsigned int                    size;
-       struct nft_hash_elem __rcu      *buckets[];
-};
+/* We target a hash table size of 4, element hint is 75% of final size */
+#define NFT_HASH_ELEMENT_HINT 3
 
 struct nft_hash_elem {
-       struct nft_hash_elem __rcu      *next;
+       struct rhash_head               node;
        struct nft_data                 key;
        struct nft_data                 data[];
 };
 
-#define nft_hash_for_each_entry(i, head) \
-       for (i = nft_dereference(head); i != NULL; i = nft_dereference(i->next))
-#define nft_hash_for_each_entry_rcu(i, head) \
-       for (i = rcu_dereference(head); i != NULL; i = rcu_dereference(i->next))
-
-static u32 nft_hash_rnd __read_mostly;
-static bool nft_hash_rnd_initted __read_mostly;
-
-static unsigned int nft_hash_data(const struct nft_data *data,
-                                 unsigned int hsize, unsigned int len)
-{
-       unsigned int h;
-
-       h = jhash(data->data, len, nft_hash_rnd);
-       return h & (hsize - 1);
-}
-
 static bool nft_hash_lookup(const struct nft_set *set,
                            const struct nft_data *key,
                            struct nft_data *data)
 {
-       const struct nft_hash *priv = nft_set_priv(set);
-       const struct nft_hash_table *tbl = rcu_dereference(priv->tbl);
+       const struct rhashtable *priv = nft_set_priv(set);
        const struct nft_hash_elem *he;
-       unsigned int h;
-
-       h = nft_hash_data(key, tbl->size, set->klen);
-       nft_hash_for_each_entry_rcu(he, tbl->buckets[h]) {
-               if (nft_data_cmp(&he->key, key, set->klen))
-                       continue;
-               if (set->flags & NFT_SET_MAP)
-                       nft_data_copy(data, he->data);
-               return true;
-       }
-       return false;
-}
-
-static void nft_hash_tbl_free(const struct nft_hash_table *tbl)
-{
-       kvfree(tbl);
-}
-
-static unsigned int nft_hash_tbl_size(unsigned int nelem)
-{
-       return max(roundup_pow_of_two(nelem * 4 / 3), NFT_HASH_MIN_SIZE);
-}
-
-static struct nft_hash_table *nft_hash_tbl_alloc(unsigned int nbuckets)
-{
-       struct nft_hash_table *tbl;
-       size_t size;
-
-       size = sizeof(*tbl) + nbuckets * sizeof(tbl->buckets[0]);
-       tbl = kzalloc(size, GFP_KERNEL | __GFP_REPEAT | __GFP_NOWARN);
-       if (tbl == NULL)
-               tbl = vzalloc(size);
-       if (tbl == NULL)
-               return NULL;
-       tbl->size = nbuckets;
-
-       return tbl;
-}
-
-static void nft_hash_chain_unzip(const struct nft_set *set,
-                                const struct nft_hash_table *ntbl,
-                                struct nft_hash_table *tbl, unsigned int n)
-{
-       struct nft_hash_elem *he, *last, *next;
-       unsigned int h;
-
-       he = nft_dereference(tbl->buckets[n]);
-       if (he == NULL)
-               return;
-       h = nft_hash_data(&he->key, ntbl->size, set->klen);
-
-       /* Find last element of first chain hashing to bucket h */
-       last = he;
-       nft_hash_for_each_entry(he, he->next) {
-               if (nft_hash_data(&he->key, ntbl->size, set->klen) != h)
-                       break;
-               last = he;
-       }
-
-       /* Unlink first chain from the old table */
-       RCU_INIT_POINTER(tbl->buckets[n], last->next);
 
-       /* If end of chain reached, done */
-       if (he == NULL)
-               return;
+       he = rhashtable_lookup(priv, key);
+       if (he && set->flags & NFT_SET_MAP)
+               nft_data_copy(data, he->data);
 
-       /* Find first element of second chain hashing to bucket h */
-       next = NULL;
-       nft_hash_for_each_entry(he, he->next) {
-               if (nft_hash_data(&he->key, ntbl->size, set->klen) != h)
-                       continue;
-               next = he;
-               break;
-       }
-
-       /* Link the two chains */
-       RCU_INIT_POINTER(last->next, next);
-}
-
-static int nft_hash_tbl_expand(const struct nft_set *set, struct nft_hash *priv)
-{
-       struct nft_hash_table *tbl = nft_dereference(priv->tbl), *ntbl;
-       struct nft_hash_elem *he;
-       unsigned int i, h;
-       bool complete;
-
-       ntbl = nft_hash_tbl_alloc(tbl->size * 2);
-       if (ntbl == NULL)
-               return -ENOMEM;
-
-       /* Link new table's buckets to first element in the old table
-        * hashing to the new bucket.
-        */
-       for (i = 0; i < ntbl->size; i++) {
-               h = i < tbl->size ? i : i - tbl->size;
-               nft_hash_for_each_entry(he, tbl->buckets[h]) {
-                       if (nft_hash_data(&he->key, ntbl->size, set->klen) != i)
-                               continue;
-                       RCU_INIT_POINTER(ntbl->buckets[i], he);
-                       break;
-               }
-       }
-
-       /* Publish new table */
-       rcu_assign_pointer(priv->tbl, ntbl);
-
-       /* Unzip interleaved hash chains */
-       do {
-               /* Wait for readers to use new table/unzipped chains */
-               synchronize_rcu();
-
-               complete = true;
-               for (i = 0; i < tbl->size; i++) {
-                       nft_hash_chain_unzip(set, ntbl, tbl, i);
-                       if (tbl->buckets[i] != NULL)
-                               complete = false;
-               }
-       } while (!complete);
-
-       nft_hash_tbl_free(tbl);
-       return 0;
-}
-
-static int nft_hash_tbl_shrink(const struct nft_set *set, struct nft_hash *priv)
-{
-       struct nft_hash_table *tbl = nft_dereference(priv->tbl), *ntbl;
-       struct nft_hash_elem __rcu **pprev;
-       unsigned int i;
-
-       ntbl = nft_hash_tbl_alloc(tbl->size / 2);
-       if (ntbl == NULL)
-               return -ENOMEM;
-
-       for (i = 0; i < ntbl->size; i++) {
-               ntbl->buckets[i] = tbl->buckets[i];
-
-               for (pprev = &ntbl->buckets[i]; *pprev != NULL;
-                    pprev = &nft_dereference(*pprev)->next)
-                       ;
-               RCU_INIT_POINTER(*pprev, tbl->buckets[i + ntbl->size]);
-       }
-
-       /* Publish new table */
-       rcu_assign_pointer(priv->tbl, ntbl);
-       synchronize_rcu();
-
-       nft_hash_tbl_free(tbl);
-       return 0;
+       return !!he;
 }
 
 static int nft_hash_insert(const struct nft_set *set,
                           const struct nft_set_elem *elem)
 {
-       struct nft_hash *priv = nft_set_priv(set);
-       struct nft_hash_table *tbl = nft_dereference(priv->tbl);
+       struct rhashtable *priv = nft_set_priv(set);
        struct nft_hash_elem *he;
-       unsigned int size, h;
+       unsigned int size;
 
        if (elem->flags != 0)
                return -EINVAL;
@@ -234,13 +65,7 @@ static int nft_hash_insert(const struct nft_set *set,
        if (set->flags & NFT_SET_MAP)
                nft_data_copy(he->data, &elem->data);
 
-       h = nft_hash_data(&he->key, tbl->size, set->klen);
-       RCU_INIT_POINTER(he->next, tbl->buckets[h]);
-       rcu_assign_pointer(tbl->buckets[h], he);
-
-       /* Expand table when exceeding 75% load */
-       if (set->nelems + 1 > tbl->size / 4 * 3)
-               nft_hash_tbl_expand(set, priv);
+       rhashtable_insert(priv, &he->node, GFP_KERNEL);
 
        return 0;
 }
@@ -257,36 +82,31 @@ static void nft_hash_elem_destroy(const struct nft_set *set,
 static void nft_hash_remove(const struct nft_set *set,
                            const struct nft_set_elem *elem)
 {
-       struct nft_hash *priv = nft_set_priv(set);
-       struct nft_hash_table *tbl = nft_dereference(priv->tbl);
-       struct nft_hash_elem *he, __rcu **pprev;
+       struct rhashtable *priv = nft_set_priv(set);
+       struct rhash_head *he, __rcu **pprev;
 
        pprev = elem->cookie;
-       he = nft_dereference((*pprev));
+       he = rht_dereference((*pprev), priv);
+
+       rhashtable_remove_pprev(priv, he, pprev, GFP_KERNEL);
 
-       RCU_INIT_POINTER(*pprev, he->next);
        synchronize_rcu();
        kfree(he);
-
-       /* Shrink table beneath 30% load */
-       if (set->nelems - 1 < tbl->size * 3 / 10 &&
-           tbl->size > NFT_HASH_MIN_SIZE)
-               nft_hash_tbl_shrink(set, priv);
 }
 
 static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
 {
-       const struct nft_hash *priv = nft_set_priv(set);
-       const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
-       struct nft_hash_elem __rcu * const *pprev;
+       const struct rhashtable *priv = nft_set_priv(set);
+       const struct bucket_table *tbl = rht_dereference_rcu(priv->tbl, priv);
+       struct rhash_head __rcu * const *pprev;
        struct nft_hash_elem *he;
-       unsigned int h;
+       u32 h;
 
-       h = nft_hash_data(&elem->key, tbl->size, set->klen);
+       h = rhashtable_hashfn(priv, &elem->key, set->klen);
        pprev = &tbl->buckets[h];
-       nft_hash_for_each_entry(he, tbl->buckets[h]) {
+       rht_for_each_entry_rcu(he, tbl->buckets[h], node) {
                if (nft_data_cmp(&he->key, &elem->key, set->klen)) {
-                       pprev = &he->next;
+                       pprev = &he->node.next;
                        continue;
                }
 
@@ -302,14 +122,15 @@ static int nft_hash_get(const struct nft_set *set, struct nft_set_elem *elem)
 static void nft_hash_walk(const struct nft_ctx *ctx, const struct nft_set *set,
                          struct nft_set_iter *iter)
 {
-       const struct nft_hash *priv = nft_set_priv(set);
-       const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
+       const struct rhashtable *priv = nft_set_priv(set);
+       const struct bucket_table *tbl;
        const struct nft_hash_elem *he;
        struct nft_set_elem elem;
        unsigned int i;
 
+       tbl = rht_dereference_rcu(priv->tbl, priv);
        for (i = 0; i < tbl->size; i++) {
-               nft_hash_for_each_entry(he, tbl->buckets[i]) {
+               rht_for_each_entry_rcu(he, tbl->buckets[i], node) {
                        if (iter->count < iter->skip)
                                goto cont;
 
@@ -329,48 +150,46 @@ cont:
 
 static unsigned int nft_hash_privsize(const struct nlattr * const nla[])
 {
-       return sizeof(struct nft_hash);
+       return sizeof(struct rhashtable);
+}
+
+static int lockdep_nfnl_lock_is_held(void)
+{
+       return lockdep_nfnl_is_held(NFNL_SUBSYS_NFTABLES);
 }
 
 static int nft_hash_init(const struct nft_set *set,
                         const struct nft_set_desc *desc,
                         const struct nlattr * const tb[])
 {
-       struct nft_hash *priv = nft_set_priv(set);
-       struct nft_hash_table *tbl;
-       unsigned int size;
+       struct rhashtable *priv = nft_set_priv(set);
+       struct rhashtable_params params = {
+               .nelem_hint = desc->size ? : NFT_HASH_ELEMENT_HINT,
+               .head_offset = offsetof(struct nft_hash_elem, node),
+               .key_offset = offsetof(struct nft_hash_elem, key),
+               .key_len = set->klen,
+               .hashfn = jhash,
+               .grow_decision = rht_grow_above_75,
+               .shrink_decision = rht_shrink_below_30,
+               .mutex_is_held = lockdep_nfnl_lock_is_held,
+       };
 
-       if (unlikely(!nft_hash_rnd_initted)) {
-               get_random_bytes(&nft_hash_rnd, 4);
-               nft_hash_rnd_initted = true;
-       }
-
-       size = NFT_HASH_MIN_SIZE;
-       if (desc->size)
-               size = nft_hash_tbl_size(desc->size);
-
-       tbl = nft_hash_tbl_alloc(size);
-       if (tbl == NULL)
-               return -ENOMEM;
-       RCU_INIT_POINTER(priv->tbl, tbl);
-       return 0;
+       return rhashtable_init(priv, &params);
 }
 
 static void nft_hash_destroy(const struct nft_set *set)
 {
-       const struct nft_hash *priv = nft_set_priv(set);
-       const struct nft_hash_table *tbl = nft_dereference(priv->tbl);
+       const struct rhashtable *priv = nft_set_priv(set);
+       const struct bucket_table *tbl;
        struct nft_hash_elem *he, *next;
        unsigned int i;
 
-       for (i = 0; i < tbl->size; i++) {
-               for (he = nft_dereference(tbl->buckets[i]); he != NULL;
-                    he = next) {
-                       next = nft_dereference(he->next);
+       tbl = rht_dereference(priv->tbl, priv);
+       for (i = 0; i < tbl->size; i++)
+               rht_for_each_entry_safe(he, next, tbl->buckets[i], priv, node)
                        nft_hash_elem_destroy(set, he);
-               }
-       }
-       kfree(tbl);
+
+       rhashtable_destroy(priv);
 }
 
 static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
@@ -383,8 +202,8 @@ static bool nft_hash_estimate(const struct nft_set_desc *desc, u32 features,
                esize += FIELD_SIZEOF(struct nft_hash_elem, data[0]);
 
        if (desc->size) {
-               est->size = sizeof(struct nft_hash) +
-                           nft_hash_tbl_size(desc->size) *
+               est->size = sizeof(struct rhashtable) +
+                           roundup_pow_of_two(desc->size * 4 / 3) *
                            sizeof(struct nft_hash_elem *) +
                            desc->size * esize;
        } else {
index 10cfb15..bde05f2 100644 (file)
@@ -1,5 +1,6 @@
 /*
  * Copyright (c) 2008-2009 Patrick McHardy <kaber@trash.net>
+ * Copyright (c) 2012-2014 Pablo Neira Ayuso <pablo@netfilter.org>
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
@@ -41,6 +42,8 @@ static const struct nla_policy nft_log_policy[NFTA_LOG_MAX + 1] = {
        [NFTA_LOG_PREFIX]       = { .type = NLA_STRING },
        [NFTA_LOG_SNAPLEN]      = { .type = NLA_U32 },
        [NFTA_LOG_QTHRESHOLD]   = { .type = NLA_U16 },
+       [NFTA_LOG_LEVEL]        = { .type = NLA_U32 },
+       [NFTA_LOG_FLAGS]        = { .type = NLA_U32 },
 };
 
 static int nft_log_init(const struct nft_ctx *ctx,
@@ -50,6 +53,7 @@ static int nft_log_init(const struct nft_ctx *ctx,
        struct nft_log *priv = nft_expr_priv(expr);
        struct nf_loginfo *li = &priv->loginfo;
        const struct nlattr *nla;
+       int ret;
 
        nla = tb[NFTA_LOG_PREFIX];
        if (nla != NULL) {
@@ -57,30 +61,74 @@ static int nft_log_init(const struct nft_ctx *ctx,
                if (priv->prefix == NULL)
                        return -ENOMEM;
                nla_strlcpy(priv->prefix, nla, nla_len(nla) + 1);
-       } else
+       } else {
                priv->prefix = (char *)nft_log_null_prefix;
+       }
 
-       li->type = NF_LOG_TYPE_ULOG;
+       li->type = NF_LOG_TYPE_LOG;
+       if (tb[NFTA_LOG_LEVEL] != NULL &&
+           tb[NFTA_LOG_GROUP] != NULL)
+               return -EINVAL;
        if (tb[NFTA_LOG_GROUP] != NULL)
+               li->type = NF_LOG_TYPE_ULOG;
+
+       switch (li->type) {
+       case NF_LOG_TYPE_LOG:
+               if (tb[NFTA_LOG_LEVEL] != NULL) {
+                       li->u.log.level =
+                               ntohl(nla_get_be32(tb[NFTA_LOG_LEVEL]));
+               } else {
+                       li->u.log.level = 4;
+               }
+               if (tb[NFTA_LOG_FLAGS] != NULL) {
+                       li->u.log.logflags =
+                               ntohl(nla_get_be32(tb[NFTA_LOG_FLAGS]));
+               }
+               break;
+       case NF_LOG_TYPE_ULOG:
                li->u.ulog.group = ntohs(nla_get_be16(tb[NFTA_LOG_GROUP]));
+               if (tb[NFTA_LOG_SNAPLEN] != NULL) {
+                       li->u.ulog.copy_len =
+                               ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
+               }
+               if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
+                       li->u.ulog.qthreshold =
+                               ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+               }
+               break;
+       }
 
-       if (tb[NFTA_LOG_SNAPLEN] != NULL)
-               li->u.ulog.copy_len = ntohl(nla_get_be32(tb[NFTA_LOG_SNAPLEN]));
-       if (tb[NFTA_LOG_QTHRESHOLD] != NULL) {
-               li->u.ulog.qthreshold =
-                       ntohs(nla_get_be16(tb[NFTA_LOG_QTHRESHOLD]));
+       if (ctx->afi->family == NFPROTO_INET) {
+               ret = nf_logger_find_get(NFPROTO_IPV4, li->type);
+               if (ret < 0)
+                       return ret;
+
+               ret = nf_logger_find_get(NFPROTO_IPV6, li->type);
+               if (ret < 0) {
+                       nf_logger_put(NFPROTO_IPV4, li->type);
+                       return ret;
+               }
+               return 0;
        }
 
-       return 0;
+       return nf_logger_find_get(ctx->afi->family, li->type);
 }
 
 static void nft_log_destroy(const struct nft_ctx *ctx,
                            const struct nft_expr *expr)
 {
        struct nft_log *priv = nft_expr_priv(expr);
+       struct nf_loginfo *li = &priv->loginfo;
 
        if (priv->prefix != nft_log_null_prefix)
                kfree(priv->prefix);
+
+       if (ctx->afi->family == NFPROTO_INET) {
+               nf_logger_put(NFPROTO_IPV4, li->type);
+               nf_logger_put(NFPROTO_IPV6, li->type);
+       } else {
+               nf_logger_put(ctx->afi->family, li->type);
+       }
 }
 
 static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
@@ -91,17 +139,33 @@ static int nft_log_dump(struct sk_buff *skb, const struct nft_expr *expr)
        if (priv->prefix != nft_log_null_prefix)
                if (nla_put_string(skb, NFTA_LOG_PREFIX, priv->prefix))
                        goto nla_put_failure;
-       if (li->u.ulog.group)
-               if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
-                       goto nla_put_failure;
-       if (li->u.ulog.copy_len)
-               if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
-                                htonl(li->u.ulog.copy_len)))
+       switch (li->type) {
+       case NF_LOG_TYPE_LOG:
+               if (nla_put_be32(skb, NFTA_LOG_LEVEL, htonl(li->u.log.level)))
                        goto nla_put_failure;
-       if (li->u.ulog.qthreshold)
-               if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
-                                htons(li->u.ulog.qthreshold)))
+
+               if (li->u.log.logflags) {
+                       if (nla_put_be32(skb, NFTA_LOG_FLAGS,
+                                        htonl(li->u.log.logflags)))
+                               goto nla_put_failure;
+               }
+               break;
+       case NF_LOG_TYPE_ULOG:
+               if (nla_put_be16(skb, NFTA_LOG_GROUP, htons(li->u.ulog.group)))
                        goto nla_put_failure;
+
+               if (li->u.ulog.copy_len) {
+                       if (nla_put_be32(skb, NFTA_LOG_SNAPLEN,
+                                        htonl(li->u.ulog.copy_len)))
+                               goto nla_put_failure;
+               }
+               if (li->u.ulog.qthreshold) {
+                       if (nla_put_be16(skb, NFTA_LOG_QTHRESHOLD,
+                                        htons(li->u.ulog.qthreshold)))
+                               goto nla_put_failure;
+               }
+               break;
+       }
        return 0;
 
 nla_put_failure:
index a0195d2..79ff58c 100644 (file)
@@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
        if (nla_put_be32(skb,
                         NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
                goto nla_put_failure;
-       if (nla_put_be32(skb,
-                        NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min)))
-               goto nla_put_failure;
-       if (nla_put_be32(skb,
-                        NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max)))
-               goto nla_put_failure;
+       if (priv->sreg_proto_min) {
+               if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
+                                htonl(priv->sreg_proto_min)))
+                       goto nla_put_failure;
+               if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
+                                htonl(priv->sreg_proto_max)))
+                       goto nla_put_failure;
+       }
        return 0;
 
 nla_put_failure:
index 227aa11..47b978b 100644 (file)
@@ -711,28 +711,15 @@ void xt_free_table_info(struct xt_table_info *info)
 {
        int cpu;
 
-       for_each_possible_cpu(cpu) {
-               if (info->size <= PAGE_SIZE)
-                       kfree(info->entries[cpu]);
-               else
-                       vfree(info->entries[cpu]);
-       }
+       for_each_possible_cpu(cpu)
+               kvfree(info->entries[cpu]);
 
        if (info->jumpstack != NULL) {
-               if (sizeof(void *) * info->stacksize > PAGE_SIZE) {
-                       for_each_possible_cpu(cpu)
-                               vfree(info->jumpstack[cpu]);
-               } else {
-                       for_each_possible_cpu(cpu)
-                               kfree(info->jumpstack[cpu]);
-               }
+               for_each_possible_cpu(cpu)
+                       kvfree(info->jumpstack[cpu]);
+               kvfree(info->jumpstack);
        }
 
-       if (sizeof(void **) * nr_cpu_ids > PAGE_SIZE)
-               vfree(info->jumpstack);
-       else
-               kfree(info->jumpstack);
-
        free_percpu(info->stackptr);
 
        kfree(info);
index 993de2b..f14bcf2 100644 (file)
@@ -133,9 +133,7 @@ static int led_tg_check(const struct xt_tgchk_param *par)
 
        err = led_trigger_register(&ledinternal->netfilter_led_trigger);
        if (err) {
-               pr_warning("led_trigger_register() failed\n");
-               if (err == -EEXIST)
-                       pr_warning("Trigger name is already in use.\n");
+               pr_err("Trigger name is already in use.\n");
                goto exit_alloc;
        }
 
index 5ab2484..c13b794 100644 (file)
 #include <linux/netfilter/xt_LOG.h>
 #include <linux/netfilter_ipv6/ip6_tables.h>
 #include <net/netfilter/nf_log.h>
-#include <net/netfilter/xt_log.h>
-
-static struct nf_loginfo default_loginfo = {
-       .type   = NF_LOG_TYPE_LOG,
-       .u = {
-               .log = {
-                       .level    = 5,
-                       .logflags = NF_LOG_MASK,
-               },
-       },
-};
-
-static int dump_udp_header(struct sbuff *m, const struct sk_buff *skb,
-                          u8 proto, int fragment, unsigned int offset)
-{
-       struct udphdr _udph;
-       const struct udphdr *uh;
-
-       if (proto == IPPROTO_UDP)
-               /* Max length: 10 "PROTO=UDP "     */
-               sb_add(m, "PROTO=UDP ");
-       else    /* Max length: 14 "PROTO=UDPLITE " */
-               sb_add(m, "PROTO=UDPLITE ");
-
-       if (fragment)
-               goto out;
-
-       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-       uh = skb_header_pointer(skb, offset, sizeof(_udph), &_udph);
-       if (uh == NULL) {
-               sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
-
-               return 1;
-       }
-
-       /* Max length: 20 "SPT=65535 DPT=65535 " */
-       sb_add(m, "SPT=%u DPT=%u LEN=%u ", ntohs(uh->source), ntohs(uh->dest),
-               ntohs(uh->len));
-
-out:
-       return 0;
-}
-
-static int dump_tcp_header(struct sbuff *m, const struct sk_buff *skb,
-                          u8 proto, int fragment, unsigned int offset,
-                          unsigned int logflags)
-{
-       struct tcphdr _tcph;
-       const struct tcphdr *th;
-
-       /* Max length: 10 "PROTO=TCP " */
-       sb_add(m, "PROTO=TCP ");
-
-       if (fragment)
-               return 0;
-
-       /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-       th = skb_header_pointer(skb, offset, sizeof(_tcph), &_tcph);
-       if (th == NULL) {
-               sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - offset);
-               return 1;
-       }
-
-       /* Max length: 20 "SPT=65535 DPT=65535 " */
-       sb_add(m, "SPT=%u DPT=%u ", ntohs(th->source), ntohs(th->dest));
-       /* Max length: 30 "SEQ=4294967295 ACK=4294967295 " */
-       if (logflags & XT_LOG_TCPSEQ)
-               sb_add(m, "SEQ=%u ACK=%u ", ntohl(th->seq), ntohl(th->ack_seq));
-
-       /* Max length: 13 "WINDOW=65535 " */
-       sb_add(m, "WINDOW=%u ", ntohs(th->window));
-       /* Max length: 9 "RES=0x3C " */
-       sb_add(m, "RES=0x%02x ", (u_int8_t)(ntohl(tcp_flag_word(th) &
-                                           TCP_RESERVED_BITS) >> 22));
-       /* Max length: 32 "CWR ECE URG ACK PSH RST SYN FIN " */
-       if (th->cwr)
-               sb_add(m, "CWR ");
-       if (th->ece)
-               sb_add(m, "ECE ");
-       if (th->urg)
-               sb_add(m, "URG ");
-       if (th->ack)
-               sb_add(m, "ACK ");
-       if (th->psh)
-               sb_add(m, "PSH ");
-       if (th->rst)
-               sb_add(m, "RST ");
-       if (th->syn)
-               sb_add(m, "SYN ");
-       if (th->fin)
-               sb_add(m, "FIN ");
-       /* Max length: 11 "URGP=65535 " */
-       sb_add(m, "URGP=%u ", ntohs(th->urg_ptr));
-
-       if ((logflags & XT_LOG_TCPOPT) && th->doff*4 > sizeof(struct tcphdr)) {
-               u_int8_t _opt[60 - sizeof(struct tcphdr)];
-               const u_int8_t *op;
-               unsigned int i;
-               unsigned int optsize = th->doff*4 - sizeof(struct tcphdr);
-
-               op = skb_header_pointer(skb, offset + sizeof(struct tcphdr),
-                                       optsize, _opt);
-               if (op == NULL) {
-                       sb_add(m, "OPT (TRUNCATED)");
-                       return 1;
-               }
-
-               /* Max length: 127 "OPT (" 15*4*2chars ") " */
-               sb_add(m, "OPT (");
-               for (i = 0; i < optsize; i++)
-                       sb_add(m, "%02X", op[i]);
-
-               sb_add(m, ") ");
-       }
-
-       return 0;
-}
-
-static void dump_sk_uid_gid(struct sbuff *m, struct sock *sk)
-{
-       if (!sk || sk->sk_state == TCP_TIME_WAIT)
-               return;
-
-       read_lock_bh(&sk->sk_callback_lock);
-       if (sk->sk_socket && sk->sk_socket->file) {
-               const struct cred *cred = sk->sk_socket->file->f_cred;
-               sb_add(m, "UID=%u GID=%u ",
-                       from_kuid_munged(&init_user_ns, cred->fsuid),
-                       from_kgid_munged(&init_user_ns, cred->fsgid));
-       }
-       read_unlock_bh(&sk->sk_callback_lock);
-}
-
-/* One level of recursion won't kill us */
-static void dump_ipv4_packet(struct sbuff *m,
-                       const struct nf_loginfo *info,
-                       const struct sk_buff *skb,
-                       unsigned int iphoff)
-{
-       struct iphdr _iph;
-       const struct iphdr *ih;
-       unsigned int logflags;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-       else
-               logflags = NF_LOG_MASK;
-
-       ih = skb_header_pointer(skb, iphoff, sizeof(_iph), &_iph);
-       if (ih == NULL) {
-               sb_add(m, "TRUNCATED");
-               return;
-       }
-
-       /* Important fields:
-        * TOS, len, DF/MF, fragment offset, TTL, src, dst, options. */
-       /* Max length: 40 "SRC=255.255.255.255 DST=255.255.255.255 " */
-       sb_add(m, "SRC=%pI4 DST=%pI4 ",
-              &ih->saddr, &ih->daddr);
-
-       /* Max length: 46 "LEN=65535 TOS=0xFF PREC=0xFF TTL=255 ID=65535 " */
-       sb_add(m, "LEN=%u TOS=0x%02X PREC=0x%02X TTL=%u ID=%u ",
-              ntohs(ih->tot_len), ih->tos & IPTOS_TOS_MASK,
-              ih->tos & IPTOS_PREC_MASK, ih->ttl, ntohs(ih->id));
-
-       /* Max length: 6 "CE DF MF " */
-       if (ntohs(ih->frag_off) & IP_CE)
-               sb_add(m, "CE ");
-       if (ntohs(ih->frag_off) & IP_DF)
-               sb_add(m, "DF ");
-       if (ntohs(ih->frag_off) & IP_MF)
-               sb_add(m, "MF ");
-
-       /* Max length: 11 "FRAG:65535 " */
-       if (ntohs(ih->frag_off) & IP_OFFSET)
-               sb_add(m, "FRAG:%u ", ntohs(ih->frag_off) & IP_OFFSET);
-
-       if ((logflags & XT_LOG_IPOPT) &&
-           ih->ihl * 4 > sizeof(struct iphdr)) {
-               const unsigned char *op;
-               unsigned char _opt[4 * 15 - sizeof(struct iphdr)];
-               unsigned int i, optsize;
-
-               optsize = ih->ihl * 4 - sizeof(struct iphdr);
-               op = skb_header_pointer(skb, iphoff+sizeof(_iph),
-                                       optsize, _opt);
-               if (op == NULL) {
-                       sb_add(m, "TRUNCATED");
-                       return;
-               }
-
-               /* Max length: 127 "OPT (" 15*4*2chars ") " */
-               sb_add(m, "OPT (");
-               for (i = 0; i < optsize; i++)
-                       sb_add(m, "%02X", op[i]);
-               sb_add(m, ") ");
-       }
-
-       switch (ih->protocol) {
-       case IPPROTO_TCP:
-               if (dump_tcp_header(m, skb, ih->protocol,
-                                   ntohs(ih->frag_off) & IP_OFFSET,
-                                   iphoff+ih->ihl*4, logflags))
-                       return;
-               break;
-       case IPPROTO_UDP:
-       case IPPROTO_UDPLITE:
-               if (dump_udp_header(m, skb, ih->protocol,
-                                   ntohs(ih->frag_off) & IP_OFFSET,
-                                   iphoff+ih->ihl*4))
-                       return;
-               break;
-       case IPPROTO_ICMP: {
-               struct icmphdr _icmph;
-               const struct icmphdr *ich;
-               static const size_t required_len[NR_ICMP_TYPES+1]
-                       = { [ICMP_ECHOREPLY] = 4,
-                           [ICMP_DEST_UNREACH]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_SOURCE_QUENCH]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_REDIRECT]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_ECHO] = 4,
-                           [ICMP_TIME_EXCEEDED]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_PARAMETERPROB]
-                           = 8 + sizeof(struct iphdr),
-                           [ICMP_TIMESTAMP] = 20,
-                           [ICMP_TIMESTAMPREPLY] = 20,
-                           [ICMP_ADDRESS] = 12,
-                           [ICMP_ADDRESSREPLY] = 12 };
-
-               /* Max length: 11 "PROTO=ICMP " */
-               sb_add(m, "PROTO=ICMP ");
-
-               if (ntohs(ih->frag_off) & IP_OFFSET)
-                       break;
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               ich = skb_header_pointer(skb, iphoff + ih->ihl * 4,
-                                        sizeof(_icmph), &_icmph);
-               if (ich == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               /* Max length: 18 "TYPE=255 CODE=255 " */
-               sb_add(m, "TYPE=%u CODE=%u ", ich->type, ich->code);
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               if (ich->type <= NR_ICMP_TYPES &&
-                   required_len[ich->type] &&
-                   skb->len-iphoff-ih->ihl*4 < required_len[ich->type]) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               switch (ich->type) {
-               case ICMP_ECHOREPLY:
-               case ICMP_ECHO:
-                       /* Max length: 19 "ID=65535 SEQ=65535 " */
-                       sb_add(m, "ID=%u SEQ=%u ",
-                              ntohs(ich->un.echo.id),
-                              ntohs(ich->un.echo.sequence));
-                       break;
-
-               case ICMP_PARAMETERPROB:
-                       /* Max length: 14 "PARAMETER=255 " */
-                       sb_add(m, "PARAMETER=%u ",
-                              ntohl(ich->un.gateway) >> 24);
-                       break;
-               case ICMP_REDIRECT:
-                       /* Max length: 24 "GATEWAY=255.255.255.255 " */
-                       sb_add(m, "GATEWAY=%pI4 ", &ich->un.gateway);
-                       /* Fall through */
-               case ICMP_DEST_UNREACH:
-               case ICMP_SOURCE_QUENCH:
-               case ICMP_TIME_EXCEEDED:
-                       /* Max length: 3+maxlen */
-                       if (!iphoff) { /* Only recurse once. */
-                               sb_add(m, "[");
-                               dump_ipv4_packet(m, info, skb,
-                                           iphoff + ih->ihl*4+sizeof(_icmph));
-                               sb_add(m, "] ");
-                       }
-
-                       /* Max length: 10 "MTU=65535 " */
-                       if (ich->type == ICMP_DEST_UNREACH &&
-                           ich->code == ICMP_FRAG_NEEDED)
-                               sb_add(m, "MTU=%u ", ntohs(ich->un.frag.mtu));
-               }
-               break;
-       }
-       /* Max Length */
-       case IPPROTO_AH: {
-               struct ip_auth_hdr _ahdr;
-               const struct ip_auth_hdr *ah;
-
-               if (ntohs(ih->frag_off) & IP_OFFSET)
-                       break;
-
-               /* Max length: 9 "PROTO=AH " */
-               sb_add(m, "PROTO=AH ");
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               ah = skb_header_pointer(skb, iphoff+ih->ihl*4,
-                                       sizeof(_ahdr), &_ahdr);
-               if (ah == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               /* Length: 15 "SPI=0xF1234567 " */
-               sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
-               break;
-       }
-       case IPPROTO_ESP: {
-               struct ip_esp_hdr _esph;
-               const struct ip_esp_hdr *eh;
-
-               /* Max length: 10 "PROTO=ESP " */
-               sb_add(m, "PROTO=ESP ");
-
-               if (ntohs(ih->frag_off) & IP_OFFSET)
-                       break;
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               eh = skb_header_pointer(skb, iphoff+ih->ihl*4,
-                                       sizeof(_esph), &_esph);
-               if (eh == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ",
-                              skb->len - iphoff - ih->ihl*4);
-                       break;
-               }
-
-               /* Length: 15 "SPI=0xF1234567 " */
-               sb_add(m, "SPI=0x%x ", ntohl(eh->spi));
-               break;
-       }
-       /* Max length: 10 "PROTO 255 " */
-       default:
-               sb_add(m, "PROTO=%u ", ih->protocol);
-       }
-
-       /* Max length: 15 "UID=4294967295 " */
-       if ((logflags & XT_LOG_UID) && !iphoff)
-               dump_sk_uid_gid(m, skb->sk);
-
-       /* Max length: 16 "MARK=0xFFFFFFFF " */
-       if (!iphoff && skb->mark)
-               sb_add(m, "MARK=0x%x ", skb->mark);
-
-       /* Proto    Max log string length */
-       /* IP:      40+46+6+11+127 = 230 */
-       /* TCP:     10+max(25,20+30+13+9+32+11+127) = 252 */
-       /* UDP:     10+max(25,20) = 35 */
-       /* UDPLITE: 14+max(25,20) = 39 */
-       /* ICMP:    11+max(25, 18+25+max(19,14,24+3+n+10,3+n+10)) = 91+n */
-       /* ESP:     10+max(25)+15 = 50 */
-       /* AH:      9+max(25)+15 = 49 */
-       /* unknown: 10 */
-
-       /* (ICMP allows recursion one level deep) */
-       /* maxlen =  IP + ICMP +  IP + max(TCP,UDP,ICMP,unknown) */
-       /* maxlen = 230+   91  + 230 + 252 = 803 */
-}
-
-static void dump_ipv4_mac_header(struct sbuff *m,
-                           const struct nf_loginfo *info,
-                           const struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       unsigned int logflags = 0;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-
-       if (!(logflags & XT_LOG_MACDECODE))
-               goto fallback;
-
-       switch (dev->type) {
-       case ARPHRD_ETHER:
-               sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
-                      eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
-                      ntohs(eth_hdr(skb)->h_proto));
-               return;
-       default:
-               break;
-       }
-
-fallback:
-       sb_add(m, "MAC=");
-       if (dev->hard_header_len &&
-           skb->mac_header != skb->network_header) {
-               const unsigned char *p = skb_mac_header(skb);
-               unsigned int i;
-
-               sb_add(m, "%02x", *p++);
-               for (i = 1; i < dev->hard_header_len; i++, p++)
-                       sb_add(m, ":%02x", *p);
-       }
-       sb_add(m, " ");
-}
-
-static void
-log_packet_common(struct sbuff *m,
-                 u_int8_t pf,
-                 unsigned int hooknum,
-                 const struct sk_buff *skb,
-                 const struct net_device *in,
-                 const struct net_device *out,
-                 const struct nf_loginfo *loginfo,
-                 const char *prefix)
-{
-       sb_add(m, KERN_SOH "%c%sIN=%s OUT=%s ",
-              '0' + loginfo->u.log.level, prefix,
-              in ? in->name : "",
-              out ? out->name : "");
-#ifdef CONFIG_BRIDGE_NETFILTER
-       if (skb->nf_bridge) {
-               const struct net_device *physindev;
-               const struct net_device *physoutdev;
-
-               physindev = skb->nf_bridge->physindev;
-               if (physindev && in != physindev)
-                       sb_add(m, "PHYSIN=%s ", physindev->name);
-               physoutdev = skb->nf_bridge->physoutdev;
-               if (physoutdev && out != physoutdev)
-                       sb_add(m, "PHYSOUT=%s ", physoutdev->name);
-       }
-#endif
-}
-
-
-static void
-ipt_log_packet(struct net *net,
-              u_int8_t pf,
-              unsigned int hooknum,
-              const struct sk_buff *skb,
-              const struct net_device *in,
-              const struct net_device *out,
-              const struct nf_loginfo *loginfo,
-              const char *prefix)
-{
-       struct sbuff *m;
-
-       /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
-               return;
-
-       m = sb_open();
-
-       if (!loginfo)
-               loginfo = &default_loginfo;
-
-       log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
-
-       if (in != NULL)
-               dump_ipv4_mac_header(m, loginfo, skb);
-
-       dump_ipv4_packet(m, loginfo, skb, 0);
-
-       sb_close(m);
-}
-
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-/* One level of recursion won't kill us */
-static void dump_ipv6_packet(struct sbuff *m,
-                       const struct nf_loginfo *info,
-                       const struct sk_buff *skb, unsigned int ip6hoff,
-                       int recurse)
-{
-       u_int8_t currenthdr;
-       int fragment;
-       struct ipv6hdr _ip6h;
-       const struct ipv6hdr *ih;
-       unsigned int ptr;
-       unsigned int hdrlen = 0;
-       unsigned int logflags;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-       else
-               logflags = NF_LOG_MASK;
-
-       ih = skb_header_pointer(skb, ip6hoff, sizeof(_ip6h), &_ip6h);
-       if (ih == NULL) {
-               sb_add(m, "TRUNCATED");
-               return;
-       }
-
-       /* Max length: 88 "SRC=0000.0000.0000.0000.0000.0000.0000.0000 DST=0000.0000.0000.0000.0000.0000.0000.0000 " */
-       sb_add(m, "SRC=%pI6 DST=%pI6 ", &ih->saddr, &ih->daddr);
-
-       /* Max length: 44 "LEN=65535 TC=255 HOPLIMIT=255 FLOWLBL=FFFFF " */
-       sb_add(m, "LEN=%Zu TC=%u HOPLIMIT=%u FLOWLBL=%u ",
-              ntohs(ih->payload_len) + sizeof(struct ipv6hdr),
-              (ntohl(*(__be32 *)ih) & 0x0ff00000) >> 20,
-              ih->hop_limit,
-              (ntohl(*(__be32 *)ih) & 0x000fffff));
-
-       fragment = 0;
-       ptr = ip6hoff + sizeof(struct ipv6hdr);
-       currenthdr = ih->nexthdr;
-       while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
-               struct ipv6_opt_hdr _hdr;
-               const struct ipv6_opt_hdr *hp;
-
-               hp = skb_header_pointer(skb, ptr, sizeof(_hdr), &_hdr);
-               if (hp == NULL) {
-                       sb_add(m, "TRUNCATED");
-                       return;
-               }
-
-               /* Max length: 48 "OPT (...) " */
-               if (logflags & XT_LOG_IPOPT)
-                       sb_add(m, "OPT ( ");
-
-               switch (currenthdr) {
-               case IPPROTO_FRAGMENT: {
-                       struct frag_hdr _fhdr;
-                       const struct frag_hdr *fh;
-
-                       sb_add(m, "FRAG:");
-                       fh = skb_header_pointer(skb, ptr, sizeof(_fhdr),
-                                               &_fhdr);
-                       if (fh == NULL) {
-                               sb_add(m, "TRUNCATED ");
-                               return;
-                       }
-
-                       /* Max length: 6 "65535 " */
-                       sb_add(m, "%u ", ntohs(fh->frag_off) & 0xFFF8);
-
-                       /* Max length: 11 "INCOMPLETE " */
-                       if (fh->frag_off & htons(0x0001))
-                               sb_add(m, "INCOMPLETE ");
-
-                       sb_add(m, "ID:%08x ", ntohl(fh->identification));
-
-                       if (ntohs(fh->frag_off) & 0xFFF8)
-                               fragment = 1;
-
-                       hdrlen = 8;
-
-                       break;
-               }
-               case IPPROTO_DSTOPTS:
-               case IPPROTO_ROUTING:
-               case IPPROTO_HOPOPTS:
-                       if (fragment) {
-                               if (logflags & XT_LOG_IPOPT)
-                                       sb_add(m, ")");
-                               return;
-                       }
-                       hdrlen = ipv6_optlen(hp);
-                       break;
-               /* Max Length */
-               case IPPROTO_AH:
-                       if (logflags & XT_LOG_IPOPT) {
-                               struct ip_auth_hdr _ahdr;
-                               const struct ip_auth_hdr *ah;
-
-                               /* Max length: 3 "AH " */
-                               sb_add(m, "AH ");
-
-                               if (fragment) {
-                                       sb_add(m, ")");
-                                       return;
-                               }
-
-                               ah = skb_header_pointer(skb, ptr, sizeof(_ahdr),
-                                                       &_ahdr);
-                               if (ah == NULL) {
-                                       /*
-                                        * Max length: 26 "INCOMPLETE [65535
-                                        *  bytes] )"
-                                        */
-                                       sb_add(m, "INCOMPLETE [%u bytes] )",
-                                              skb->len - ptr);
-                                       return;
-                               }
-
-                               /* Length: 15 "SPI=0xF1234567 */
-                               sb_add(m, "SPI=0x%x ", ntohl(ah->spi));
-
-                       }
-
-                       hdrlen = (hp->hdrlen+2)<<2;
-                       break;
-               case IPPROTO_ESP:
-                       if (logflags & XT_LOG_IPOPT) {
-                               struct ip_esp_hdr _esph;
-                               const struct ip_esp_hdr *eh;
-
-                               /* Max length: 4 "ESP " */
-                               sb_add(m, "ESP ");
-
-                               if (fragment) {
-                                       sb_add(m, ")");
-                                       return;
-                               }
-
-                               /*
-                                * Max length: 26 "INCOMPLETE [65535 bytes] )"
-                                */
-                               eh = skb_header_pointer(skb, ptr, sizeof(_esph),
-                                                       &_esph);
-                               if (eh == NULL) {
-                                       sb_add(m, "INCOMPLETE [%u bytes] )",
-                                              skb->len - ptr);
-                                       return;
-                               }
-
-                               /* Length: 16 "SPI=0xF1234567 )" */
-                               sb_add(m, "SPI=0x%x )", ntohl(eh->spi));
-
-                       }
-                       return;
-               default:
-                       /* Max length: 20 "Unknown Ext Hdr 255" */
-                       sb_add(m, "Unknown Ext Hdr %u", currenthdr);
-                       return;
-               }
-               if (logflags & XT_LOG_IPOPT)
-                       sb_add(m, ") ");
-
-               currenthdr = hp->nexthdr;
-               ptr += hdrlen;
-       }
-
-       switch (currenthdr) {
-       case IPPROTO_TCP:
-               if (dump_tcp_header(m, skb, currenthdr, fragment, ptr,
-                   logflags))
-                       return;
-               break;
-       case IPPROTO_UDP:
-       case IPPROTO_UDPLITE:
-               if (dump_udp_header(m, skb, currenthdr, fragment, ptr))
-                       return;
-               break;
-       case IPPROTO_ICMPV6: {
-               struct icmp6hdr _icmp6h;
-               const struct icmp6hdr *ic;
-
-               /* Max length: 13 "PROTO=ICMPv6 " */
-               sb_add(m, "PROTO=ICMPv6 ");
-
-               if (fragment)
-                       break;
-
-               /* Max length: 25 "INCOMPLETE [65535 bytes] " */
-               ic = skb_header_pointer(skb, ptr, sizeof(_icmp6h), &_icmp6h);
-               if (ic == NULL) {
-                       sb_add(m, "INCOMPLETE [%u bytes] ", skb->len - ptr);
-                       return;
-               }
-
-               /* Max length: 18 "TYPE=255 CODE=255 " */
-               sb_add(m, "TYPE=%u CODE=%u ", ic->icmp6_type, ic->icmp6_code);
-
-               switch (ic->icmp6_type) {
-               case ICMPV6_ECHO_REQUEST:
-               case ICMPV6_ECHO_REPLY:
-                       /* Max length: 19 "ID=65535 SEQ=65535 " */
-                       sb_add(m, "ID=%u SEQ=%u ",
-                               ntohs(ic->icmp6_identifier),
-                               ntohs(ic->icmp6_sequence));
-                       break;
-               case ICMPV6_MGM_QUERY:
-               case ICMPV6_MGM_REPORT:
-               case ICMPV6_MGM_REDUCTION:
-                       break;
-
-               case ICMPV6_PARAMPROB:
-                       /* Max length: 17 "POINTER=ffffffff " */
-                       sb_add(m, "POINTER=%08x ", ntohl(ic->icmp6_pointer));
-                       /* Fall through */
-               case ICMPV6_DEST_UNREACH:
-               case ICMPV6_PKT_TOOBIG:
-               case ICMPV6_TIME_EXCEED:
-                       /* Max length: 3+maxlen */
-                       if (recurse) {
-                               sb_add(m, "[");
-                               dump_ipv6_packet(m, info, skb,
-                                           ptr + sizeof(_icmp6h), 0);
-                               sb_add(m, "] ");
-                       }
-
-                       /* Max length: 10 "MTU=65535 " */
-                       if (ic->icmp6_type == ICMPV6_PKT_TOOBIG)
-                               sb_add(m, "MTU=%u ", ntohl(ic->icmp6_mtu));
-               }
-               break;
-       }
-       /* Max length: 10 "PROTO=255 " */
-       default:
-               sb_add(m, "PROTO=%u ", currenthdr);
-       }
-
-       /* Max length: 15 "UID=4294967295 " */
-       if ((logflags & XT_LOG_UID) && recurse)
-               dump_sk_uid_gid(m, skb->sk);
-
-       /* Max length: 16 "MARK=0xFFFFFFFF " */
-       if (recurse && skb->mark)
-               sb_add(m, "MARK=0x%x ", skb->mark);
-}
-
-static void dump_ipv6_mac_header(struct sbuff *m,
-                           const struct nf_loginfo *info,
-                           const struct sk_buff *skb)
-{
-       struct net_device *dev = skb->dev;
-       unsigned int logflags = 0;
-
-       if (info->type == NF_LOG_TYPE_LOG)
-               logflags = info->u.log.logflags;
-
-       if (!(logflags & XT_LOG_MACDECODE))
-               goto fallback;
-
-       switch (dev->type) {
-       case ARPHRD_ETHER:
-               sb_add(m, "MACSRC=%pM MACDST=%pM MACPROTO=%04x ",
-                      eth_hdr(skb)->h_source, eth_hdr(skb)->h_dest,
-                      ntohs(eth_hdr(skb)->h_proto));
-               return;
-       default:
-               break;
-       }
-
-fallback:
-       sb_add(m, "MAC=");
-       if (dev->hard_header_len &&
-           skb->mac_header != skb->network_header) {
-               const unsigned char *p = skb_mac_header(skb);
-               unsigned int len = dev->hard_header_len;
-               unsigned int i;
-
-               if (dev->type == ARPHRD_SIT) {
-                       p -= ETH_HLEN;
-
-                       if (p < skb->head)
-                               p = NULL;
-               }
-
-               if (p != NULL) {
-                       sb_add(m, "%02x", *p++);
-                       for (i = 1; i < len; i++)
-                               sb_add(m, ":%02x", *p++);
-               }
-               sb_add(m, " ");
-
-               if (dev->type == ARPHRD_SIT) {
-                       const struct iphdr *iph =
-                               (struct iphdr *)skb_mac_header(skb);
-                       sb_add(m, "TUNNEL=%pI4->%pI4 ", &iph->saddr,
-                              &iph->daddr);
-               }
-       } else
-               sb_add(m, " ");
-}
-
-static void
-ip6t_log_packet(struct net *net,
-               u_int8_t pf,
-               unsigned int hooknum,
-               const struct sk_buff *skb,
-               const struct net_device *in,
-               const struct net_device *out,
-               const struct nf_loginfo *loginfo,
-               const char *prefix)
-{
-       struct sbuff *m;
-
-       /* FIXME: Disabled from containers until syslog ns is supported */
-       if (!net_eq(net, &init_net))
-               return;
-
-       m = sb_open();
-
-       if (!loginfo)
-               loginfo = &default_loginfo;
-
-       log_packet_common(m, pf, hooknum, skb, in, out, loginfo, prefix);
-
-       if (in != NULL)
-               dump_ipv6_mac_header(m, loginfo, skb);
-
-       dump_ipv6_packet(m, loginfo, skb, skb_network_offset(skb), 1);
-
-       sb_close(m);
-}
-#endif
 
 static unsigned int
 log_tg(struct sk_buff *skb, const struct xt_action_param *par)
@@ -839,17 +39,8 @@ log_tg(struct sk_buff *skb, const struct xt_action_param *par)
        li.u.log.level = loginfo->level;
        li.u.log.logflags = loginfo->logflags;
 
-       if (par->family == NFPROTO_IPV4)
-               ipt_log_packet(net, NFPROTO_IPV4, par->hooknum, skb, par->in,
-                              par->out, &li, loginfo->prefix);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       else if (par->family == NFPROTO_IPV6)
-               ip6t_log_packet(net, NFPROTO_IPV6, par->hooknum, skb, par->in,
-                               par->out, &li, loginfo->prefix);
-#endif
-       else
-               WARN_ON_ONCE(1);
-
+       nf_log_packet(net, par->family, par->hooknum, skb, par->in, par->out,
+                     &li, "%s", loginfo->prefix);
        return XT_CONTINUE;
 }
 
@@ -870,7 +61,12 @@ static int log_tg_check(const struct xt_tgchk_param *par)
                return -EINVAL;
        }
 
-       return 0;
+       return nf_logger_find_get(par->family, NF_LOG_TYPE_LOG);
+}
+
+static void log_tg_destroy(const struct xt_tgdtor_param *par)
+{
+       nf_logger_put(par->family, NF_LOG_TYPE_LOG);
 }
 
 static struct xt_target log_tg_regs[] __read_mostly = {
@@ -880,6 +76,7 @@ static struct xt_target log_tg_regs[] __read_mostly = {
                .target         = log_tg,
                .targetsize     = sizeof(struct xt_log_info),
                .checkentry     = log_tg_check,
+               .destroy        = log_tg_destroy,
                .me             = THIS_MODULE,
        },
 #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -889,78 +86,19 @@ static struct xt_target log_tg_regs[] __read_mostly = {
                .target         = log_tg,
                .targetsize     = sizeof(struct xt_log_info),
                .checkentry     = log_tg_check,
+               .destroy        = log_tg_destroy,
                .me             = THIS_MODULE,
        },
 #endif
 };
 
-static struct nf_logger ipt_log_logger __read_mostly = {
-       .name           = "ipt_LOG",
-       .logfn          = &ipt_log_packet,
-       .me             = THIS_MODULE,
-};
-
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-static struct nf_logger ip6t_log_logger __read_mostly = {
-       .name           = "ip6t_LOG",
-       .logfn          = &ip6t_log_packet,
-       .me             = THIS_MODULE,
-};
-#endif
-
-static int __net_init log_net_init(struct net *net)
-{
-       nf_log_set(net, NFPROTO_IPV4, &ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_set(net, NFPROTO_IPV6, &ip6t_log_logger);
-#endif
-       return 0;
-}
-
-static void __net_exit log_net_exit(struct net *net)
-{
-       nf_log_unset(net, &ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_unset(net, &ip6t_log_logger);
-#endif
-}
-
-static struct pernet_operations log_net_ops = {
-       .init = log_net_init,
-       .exit = log_net_exit,
-};
-
 static int __init log_tg_init(void)
 {
-       int ret;
-
-       ret = register_pernet_subsys(&log_net_ops);
-       if (ret < 0)
-               goto err_pernet;
-
-       ret = xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
-       if (ret < 0)
-               goto err_target;
-
-       nf_log_register(NFPROTO_IPV4, &ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_register(NFPROTO_IPV6, &ip6t_log_logger);
-#endif
-       return 0;
-
-err_target:
-       unregister_pernet_subsys(&log_net_ops);
-err_pernet:
-       return ret;
+       return xt_register_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
 }
 
 static void __exit log_tg_exit(void)
 {
-       unregister_pernet_subsys(&log_net_ops);
-       nf_log_unregister(&ipt_log_logger);
-#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
-       nf_log_unregister(&ip6t_log_logger);
-#endif
        xt_unregister_targets(log_tg_regs, ARRAY_SIZE(log_tg_regs));
 }
 
index bbffdbd..dffee9d 100644 (file)
@@ -28,7 +28,7 @@ static int bpf_mt_check(const struct xt_mtchk_param *par)
        program.len = info->bpf_program_num_elem;
        program.filter = info->bpf_program;
 
-       if (sk_unattached_filter_create(&info->filter, &program)) {
+       if (bpf_prog_create(&info->filter, &program)) {
                pr_info("bpf: check failed: parse error\n");
                return -EINVAL;
        }
@@ -40,13 +40,13 @@ static bool bpf_mt(const struct sk_buff *skb, struct xt_action_param *par)
 {
        const struct xt_bpf_info *info = par->matchinfo;
 
-       return SK_RUN_FILTER(info->filter, skb);
+       return BPF_PROG_RUN(info->filter, skb);
 }
 
 static void bpf_mt_destroy(const struct xt_mtdtor_param *par)
 {
        const struct xt_bpf_info *info = par->matchinfo;
-       sk_unattached_filter_destroy(info->filter);
+       bpf_prog_destroy(info->filter);
 }
 
 static struct xt_match bpf_mt_reg __read_mostly = {
index a3910fc..47dc683 100644 (file)
@@ -104,7 +104,7 @@ struct xt_hashlimit_htable {
        spinlock_t lock;                /* lock for list_head */
        u_int32_t rnd;                  /* random seed for hash */
        unsigned int count;             /* number entries in table */
-       struct timer_list timer;        /* timer for gc */
+       struct delayed_work gc_work;
 
        /* seq_file stuff */
        struct proc_dir_entry *pde;
@@ -213,7 +213,7 @@ dsthash_free(struct xt_hashlimit_htable *ht, struct dsthash_ent *ent)
        call_rcu_bh(&ent->rcu, dsthash_free_rcu);
        ht->count--;
 }
-static void htable_gc(unsigned long htlong);
+static void htable_gc(struct work_struct *work);
 
 static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
                         u_int8_t family)
@@ -273,9 +273,9 @@ static int htable_create(struct net *net, struct xt_hashlimit_mtinfo1 *minfo,
        }
        hinfo->net = net;
 
-       setup_timer(&hinfo->timer, htable_gc, (unsigned long)hinfo);
-       hinfo->timer.expires = jiffies + msecs_to_jiffies(hinfo->cfg.gc_interval);
-       add_timer(&hinfo->timer);
+       INIT_DEFERRABLE_WORK(&hinfo->gc_work, htable_gc);
+       queue_delayed_work(system_power_efficient_wq, &hinfo->gc_work,
+                          msecs_to_jiffies(hinfo->cfg.gc_interval));
 
        hlist_add_head(&hinfo->node, &hashlimit_net->htables);
 
@@ -300,29 +300,30 @@ static void htable_selective_cleanup(struct xt_hashlimit_htable *ht,
 {
        unsigned int i;
 
-       /* lock hash table and iterate over it */
-       spin_lock_bh(&ht->lock);
        for (i = 0; i < ht->cfg.size; i++) {
                struct dsthash_ent *dh;
                struct hlist_node *n;
+
+               spin_lock_bh(&ht->lock);
                hlist_for_each_entry_safe(dh, n, &ht->hash[i], node) {
                        if ((*select)(ht, dh))
                                dsthash_free(ht, dh);
                }
+               spin_unlock_bh(&ht->lock);
+               cond_resched();
        }
-       spin_unlock_bh(&ht->lock);
 }
 
-/* hash table garbage collector, run by timer */
-static void htable_gc(unsigned long htlong)
+static void htable_gc(struct work_struct *work)
 {
-       struct xt_hashlimit_htable *ht = (struct xt_hashlimit_htable *)htlong;
+       struct xt_hashlimit_htable *ht;
+
+       ht = container_of(work, struct xt_hashlimit_htable, gc_work.work);
 
        htable_selective_cleanup(ht, select_gc);
 
-       /* re-add the timer accordingly */
-       ht->timer.expires = jiffies + msecs_to_jiffies(ht->cfg.gc_interval);
-       add_timer(&ht->timer);
+       queue_delayed_work(system_power_efficient_wq,
+                          &ht->gc_work, msecs_to_jiffies(ht->cfg.gc_interval));
 }
 
 static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
@@ -341,7 +342,7 @@ static void htable_remove_proc_entry(struct xt_hashlimit_htable *hinfo)
 
 static void htable_destroy(struct xt_hashlimit_htable *hinfo)
 {
-       del_timer_sync(&hinfo->timer);
+       cancel_delayed_work_sync(&hinfo->gc_work);
        htable_remove_proc_entry(hinfo);
        htable_selective_cleanup(hinfo, select_all);
        kfree(hinfo->name);
index 3045a96..fe9415e 100644 (file)
@@ -170,7 +170,6 @@ int netlbl_cfg_unlbl_map_add(const char *domain,
 #endif /* IPv6 */
                default:
                        goto cfg_unlbl_map_add_failure;
-                       break;
                }
 
                entry->def.addrsel = addrmap;
index 15c731f..479a344 100644 (file)
@@ -58,7 +58,9 @@
 #include <linux/mutex.h>
 #include <linux/vmalloc.h>
 #include <linux/if_arp.h>
+#include <linux/rhashtable.h>
 #include <asm/cacheflush.h>
+#include <linux/hash.h>
 
 #include <net/net_namespace.h>
 #include <net/sock.h>
@@ -100,6 +102,18 @@ static atomic_t nl_table_users = ATOMIC_INIT(0);
 
 #define nl_deref_protected(X) rcu_dereference_protected(X, lockdep_is_held(&nl_table_lock));
 
+/* Protects netlink socket hash table mutations */
+DEFINE_MUTEX(nl_sk_hash_lock);
+
+static int lockdep_nl_sk_hash_is_held(void)
+{
+#ifdef CONFIG_LOCKDEP
+       return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1;
+#else
+       return 1;
+#endif
+}
+
 static ATOMIC_NOTIFIER_HEAD(netlink_chain);
 
 static DEFINE_SPINLOCK(netlink_tap_lock);
@@ -110,11 +124,6 @@ static inline u32 netlink_group_mask(u32 group)
        return group ? 1 << (group - 1) : 0;
 }
 
-static inline struct hlist_head *nl_portid_hashfn(struct nl_portid_hash *hash, u32 portid)
-{
-       return &hash->table[jhash_1word(portid, hash->rnd) & hash->mask];
-}
-
 int netlink_add_tap(struct netlink_tap *nt)
 {
        if (unlikely(nt->dev->type != ARPHRD_NETLINK))
@@ -170,7 +179,6 @@ EXPORT_SYMBOL_GPL(netlink_remove_tap);
 static bool netlink_filter_tap(const struct sk_buff *skb)
 {
        struct sock *sk = skb->sk;
-       bool pass = false;
 
        /* We take the more conservative approach and
         * whitelist socket protocols that may pass.
@@ -184,11 +192,10 @@ static bool netlink_filter_tap(const struct sk_buff *skb)
        case NETLINK_FIB_LOOKUP:
        case NETLINK_NETFILTER:
        case NETLINK_GENERIC:
-               pass = true;
-               break;
+               return true;
        }
 
-       return pass;
+       return false;
 }
 
 static int __netlink_deliver_tap_skb(struct sk_buff *skb,
@@ -376,7 +383,7 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
 
                if ((int)req->nm_block_size <= 0)
                        return -EINVAL;
-               if (!IS_ALIGNED(req->nm_block_size, PAGE_SIZE))
+               if (!PAGE_ALIGNED(req->nm_block_size))
                        return -EINVAL;
                if (req->nm_frame_size < NL_MMAP_HDRLEN)
                        return -EINVAL;
@@ -636,7 +643,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
                while (nlk->cb_running && netlink_dump_space(nlk)) {
                        err = netlink_dump(sk);
                        if (err < 0) {
-                               sk->sk_err = err;
+                               sk->sk_err = -err;
                                sk->sk_error_report(sk);
                                break;
                        }
@@ -985,105 +992,48 @@ netlink_unlock_table(void)
                wake_up(&nl_table_wait);
 }
 
-static bool netlink_compare(struct net *net, struct sock *sk)
+struct netlink_compare_arg
 {
-       return net_eq(sock_net(sk), net);
-}
+       struct net *net;
+       u32 portid;
+};
 
-static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
+static bool netlink_compare(void *ptr, void *arg)
 {
-       struct netlink_table *table = &nl_table[protocol];
-       struct nl_portid_hash *hash = &table->hash;
-       struct hlist_head *head;
-       struct sock *sk;
+       struct netlink_compare_arg *x = arg;
+       struct sock *sk = ptr;
 
-       read_lock(&nl_table_lock);
-       head = nl_portid_hashfn(hash, portid);
-       sk_for_each(sk, head) {
-               if (table->compare(net, sk) &&
-                   (nlk_sk(sk)->portid == portid)) {
-                       sock_hold(sk);
-                       goto found;
-               }
-       }
-       sk = NULL;
-found:
-       read_unlock(&nl_table_lock);
-       return sk;
+       return nlk_sk(sk)->portid == x->portid &&
+              net_eq(sock_net(sk), x->net);
 }
 
-static struct hlist_head *nl_portid_hash_zalloc(size_t size)
+static struct sock *__netlink_lookup(struct netlink_table *table, u32 portid,
+                                    struct net *net)
 {
-       if (size <= PAGE_SIZE)
-               return kzalloc(size, GFP_ATOMIC);
-       else
-               return (struct hlist_head *)
-                       __get_free_pages(GFP_ATOMIC | __GFP_ZERO,
-                                        get_order(size));
-}
+       struct netlink_compare_arg arg = {
+               .net = net,
+               .portid = portid,
+       };
+       u32 hash;
 
-static void nl_portid_hash_free(struct hlist_head *table, size_t size)
-{
-       if (size <= PAGE_SIZE)
-               kfree(table);
-       else
-               free_pages((unsigned long)table, get_order(size));
-}
-
-static int nl_portid_hash_rehash(struct nl_portid_hash *hash, int grow)
-{
-       unsigned int omask, mask, shift;
-       size_t osize, size;
-       struct hlist_head *otable, *table;
-       int i;
-
-       omask = mask = hash->mask;
-       osize = size = (mask + 1) * sizeof(*table);
-       shift = hash->shift;
-
-       if (grow) {
-               if (++shift > hash->max_shift)
-                       return 0;
-               mask = mask * 2 + 1;
-               size *= 2;
-       }
-
-       table = nl_portid_hash_zalloc(size);
-       if (!table)
-               return 0;
-
-       otable = hash->table;
-       hash->table = table;
-       hash->mask = mask;
-       hash->shift = shift;
-       get_random_bytes(&hash->rnd, sizeof(hash->rnd));
-
-       for (i = 0; i <= omask; i++) {
-               struct sock *sk;
-               struct hlist_node *tmp;
+       hash = rhashtable_hashfn(&table->hash, &portid, sizeof(portid));
 
-               sk_for_each_safe(sk, tmp, &otable[i])
-                       __sk_add_node(sk, nl_portid_hashfn(hash, nlk_sk(sk)->portid));
-       }
-
-       nl_portid_hash_free(otable, osize);
-       hash->rehash_time = jiffies + 10 * 60 * HZ;
-       return 1;
+       return rhashtable_lookup_compare(&table->hash, hash,
+                                        &netlink_compare, &arg);
 }
 
-static inline int nl_portid_hash_dilute(struct nl_portid_hash *hash, int len)
+static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
 {
-       int avg = hash->entries >> hash->shift;
-
-       if (unlikely(avg > 1) && nl_portid_hash_rehash(hash, 1))
-               return 1;
+       struct netlink_table *table = &nl_table[protocol];
+       struct sock *sk;
 
-       if (unlikely(len > avg) && time_after(jiffies, hash->rehash_time)) {
-               nl_portid_hash_rehash(hash, 0);
-               return 1;
-       }
+       rcu_read_lock();
+       sk = __netlink_lookup(table, portid, net);
+       if (sk)
+               sock_hold(sk);
+       rcu_read_unlock();
 
-       return 0;
+       return sk;
 }
 
 static const struct proto_ops netlink_ops;
@@ -1115,22 +1065,10 @@ netlink_update_listeners(struct sock *sk)
 static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
 {
        struct netlink_table *table = &nl_table[sk->sk_protocol];
-       struct nl_portid_hash *hash = &table->hash;
-       struct hlist_head *head;
        int err = -EADDRINUSE;
-       struct sock *osk;
-       int len;
 
-       netlink_table_grab();
-       head = nl_portid_hashfn(hash, portid);
-       len = 0;
-       sk_for_each(osk, head) {
-               if (table->compare(net, osk) &&
-                   (nlk_sk(osk)->portid == portid))
-                       break;
-               len++;
-       }
-       if (osk)
+       mutex_lock(&nl_sk_hash_lock);
+       if (__netlink_lookup(table, portid, net))
                goto err;
 
        err = -EBUSY;
@@ -1138,26 +1076,31 @@ static int netlink_insert(struct sock *sk, struct net *net, u32 portid)
                goto err;
 
        err = -ENOMEM;
-       if (BITS_PER_LONG > 32 && unlikely(hash->entries >= UINT_MAX))
+       if (BITS_PER_LONG > 32 && unlikely(table->hash.nelems >= UINT_MAX))
                goto err;
 
-       if (len && nl_portid_hash_dilute(hash, len))
-               head = nl_portid_hashfn(hash, portid);
-       hash->entries++;
        nlk_sk(sk)->portid = portid;
-       sk_add_node(sk, head);
+       sock_hold(sk);
+       rhashtable_insert(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL);
        err = 0;
-
 err:
-       netlink_table_ungrab();
+       mutex_unlock(&nl_sk_hash_lock);
        return err;
 }
 
 static void netlink_remove(struct sock *sk)
 {
+       struct netlink_table *table;
+
+       mutex_lock(&nl_sk_hash_lock);
+       table = &nl_table[sk->sk_protocol];
+       if (rhashtable_remove(&table->hash, &nlk_sk(sk)->node, GFP_KERNEL)) {
+               WARN_ON(atomic_read(&sk->sk_refcnt) == 1);
+               __sock_put(sk);
+       }
+       mutex_unlock(&nl_sk_hash_lock);
+
        netlink_table_grab();
-       if (sk_del_node_init(sk))
-               nl_table[sk->sk_protocol].hash.entries--;
        if (nlk_sk(sk)->subscriptions)
                __sk_del_bind_node(sk);
        netlink_table_ungrab();
@@ -1313,6 +1256,9 @@ static int netlink_release(struct socket *sock)
        }
        netlink_table_ungrab();
 
+       /* Wait for readers to complete */
+       synchronize_net();
+
        kfree(nlk->groups);
        nlk->groups = NULL;
 
@@ -1328,30 +1274,22 @@ static int netlink_autobind(struct socket *sock)
        struct sock *sk = sock->sk;
        struct net *net = sock_net(sk);
        struct netlink_table *table = &nl_table[sk->sk_protocol];
-       struct nl_portid_hash *hash = &table->hash;
-       struct hlist_head *head;
-       struct sock *osk;
        s32 portid = task_tgid_vnr(current);
        int err;
        static s32 rover = -4097;
 
 retry:
        cond_resched();
-       netlink_table_grab();
-       head = nl_portid_hashfn(hash, portid);
-       sk_for_each(osk, head) {
-               if (!table->compare(net, osk))
-                       continue;
-               if (nlk_sk(osk)->portid == portid) {
-                       /* Bind collision, search negative portid values. */
-                       portid = rover--;
-                       if (rover > -4097)
-                               rover = -4097;
-                       netlink_table_ungrab();
-                       goto retry;
-               }
+       rcu_read_lock();
+       if (__netlink_lookup(table, portid, net)) {
+               /* Bind collision, search negative portid values. */
+               portid = rover--;
+               if (rover > -4097)
+                       rover = -4097;
+               rcu_read_unlock();
+               goto retry;
        }
-       netlink_table_ungrab();
+       rcu_read_unlock();
 
        err = netlink_insert(sk, net, portid);
        if (err == -EADDRINUSE)
@@ -1961,25 +1899,25 @@ struct netlink_broadcast_data {
        void *tx_data;
 };
 
-static int do_one_broadcast(struct sock *sk,
-                                  struct netlink_broadcast_data *p)
+static void do_one_broadcast(struct sock *sk,
+                                   struct netlink_broadcast_data *p)
 {
        struct netlink_sock *nlk = nlk_sk(sk);
        int val;
 
        if (p->exclude_sk == sk)
-               goto out;
+               return;
 
        if (nlk->portid == p->portid || p->group - 1 >= nlk->ngroups ||
            !test_bit(p->group - 1, nlk->groups))
-               goto out;
+               return;
 
        if (!net_eq(sock_net(sk), p->net))
-               goto out;
+               return;
 
        if (p->failure) {
                netlink_overrun(sk);
-               goto out;
+               return;
        }
 
        sock_hold(sk);
@@ -2017,9 +1955,6 @@ static int do_one_broadcast(struct sock *sk,
                p->skb2 = NULL;
        }
        sock_put(sk);
-
-out:
-       return 0;
 }
 
 int netlink_broadcast_filtered(struct sock *ssk, struct sk_buff *skb, u32 portid,
@@ -2483,7 +2418,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
            atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
                ret = netlink_dump(sk);
                if (ret) {
-                       sk->sk_err = ret;
+                       sk->sk_err = -ret;
                        sk->sk_error_report(sk);
                }
        }
@@ -2958,14 +2893,18 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
 {
        struct nl_seq_iter *iter = seq->private;
        int i, j;
+       struct netlink_sock *nlk;
        struct sock *s;
        loff_t off = 0;
 
        for (i = 0; i < MAX_LINKS; i++) {
-               struct nl_portid_hash *hash = &nl_table[i].hash;
+               struct rhashtable *ht = &nl_table[i].hash;
+               const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
+
+               for (j = 0; j < tbl->size; j++) {
+                       rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
+                               s = (struct sock *)nlk;
 
-               for (j = 0; j <= hash->mask; j++) {
-                       sk_for_each(s, &hash->table[j]) {
                                if (sock_net(s) != seq_file_net(seq))
                                        continue;
                                if (off == pos) {
@@ -2981,15 +2920,14 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
 }
 
 static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(nl_table_lock)
 {
-       read_lock(&nl_table_lock);
+       rcu_read_lock();
        return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
 }
 
 static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 {
-       struct sock *s;
+       struct netlink_sock *nlk;
        struct nl_seq_iter *iter;
        struct net *net;
        int i, j;
@@ -3001,28 +2939,26 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 
        net = seq_file_net(seq);
        iter = seq->private;
-       s = v;
-       do {
-               s = sk_next(s);
-       } while (s && !nl_table[s->sk_protocol].compare(net, s));
-       if (s)
-               return s;
+       nlk = v;
+
+       rht_for_each_entry_rcu(nlk, nlk->node.next, node)
+               if (net_eq(sock_net((struct sock *)nlk), net))
+                       return nlk;
 
        i = iter->link;
        j = iter->hash_idx + 1;
 
        do {
-               struct nl_portid_hash *hash = &nl_table[i].hash;
-
-               for (; j <= hash->mask; j++) {
-                       s = sk_head(&hash->table[j]);
+               struct rhashtable *ht = &nl_table[i].hash;
+               const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
 
-                       while (s && !nl_table[s->sk_protocol].compare(net, s))
-                               s = sk_next(s);
-                       if (s) {
-                               iter->link = i;
-                               iter->hash_idx = j;
-                               return s;
+               for (; j < tbl->size; j++) {
+                       rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) {
+                               if (net_eq(sock_net((struct sock *)nlk), net)) {
+                                       iter->link = i;
+                                       iter->hash_idx = j;
+                                       return nlk;
+                               }
                        }
                }
 
@@ -3033,9 +2969,8 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
 }
 
 static void netlink_seq_stop(struct seq_file *seq, void *v)
-       __releases(nl_table_lock)
 {
-       read_unlock(&nl_table_lock);
+       rcu_read_unlock();
 }
 
 
@@ -3173,9 +3108,17 @@ static struct pernet_operations __net_initdata netlink_net_ops = {
 static int __init netlink_proto_init(void)
 {
        int i;
-       unsigned long limit;
-       unsigned int order;
        int err = proto_register(&netlink_proto, 0);
+       struct rhashtable_params ht_params = {
+               .head_offset = offsetof(struct netlink_sock, node),
+               .key_offset = offsetof(struct netlink_sock, portid),
+               .key_len = sizeof(u32), /* portid */
+               .hashfn = arch_fast_hash,
+               .max_shift = 16, /* 64K */
+               .grow_decision = rht_grow_above_75,
+               .shrink_decision = rht_shrink_below_30,
+               .mutex_is_held = lockdep_nl_sk_hash_is_held,
+       };
 
        if (err != 0)
                goto out;
@@ -3186,32 +3129,13 @@ static int __init netlink_proto_init(void)
        if (!nl_table)
                goto panic;
 
-       if (totalram_pages >= (128 * 1024))
-               limit = totalram_pages >> (21 - PAGE_SHIFT);
-       else
-               limit = totalram_pages >> (23 - PAGE_SHIFT);
-
-       order = get_bitmask_order(limit) - 1 + PAGE_SHIFT;
-       limit = (1UL << order) / sizeof(struct hlist_head);
-       order = get_bitmask_order(min(limit, (unsigned long)UINT_MAX)) - 1;
-
        for (i = 0; i < MAX_LINKS; i++) {
-               struct nl_portid_hash *hash = &nl_table[i].hash;
-
-               hash->table = nl_portid_hash_zalloc(1 * sizeof(*hash->table));
-               if (!hash->table) {
-                       while (i-- > 0)
-                               nl_portid_hash_free(nl_table[i].hash.table,
-                                                1 * sizeof(*hash->table));
+               if (rhashtable_init(&nl_table[i].hash, &ht_params) < 0) {
+                       while (--i > 0)
+                               rhashtable_destroy(&nl_table[i].hash);
                        kfree(nl_table);
                        goto panic;
                }
-               hash->max_shift = order;
-               hash->shift = 0;
-               hash->mask = 0;
-               hash->rehash_time = jiffies;
-
-               nl_table[i].compare = netlink_compare;
        }
 
        INIT_LIST_HEAD(&netlink_tap_all);
index 0b59d44..60f631f 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef _AF_NETLINK_H
 #define _AF_NETLINK_H
 
+#include <linux/rhashtable.h>
 #include <net/sock.h>
 
 #define NLGRPSZ(x)     (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -47,6 +48,8 @@ struct netlink_sock {
        struct netlink_ring     tx_ring;
        atomic_t                mapped;
 #endif /* CONFIG_NETLINK_MMAP */
+
+       struct rhash_head       node;
 };
 
 static inline struct netlink_sock *nlk_sk(struct sock *sk)
@@ -54,21 +57,8 @@ static inline struct netlink_sock *nlk_sk(struct sock *sk)
        return container_of(sk, struct netlink_sock, sk);
 }
 
-struct nl_portid_hash {
-       struct hlist_head       *table;
-       unsigned long           rehash_time;
-
-       unsigned int            mask;
-       unsigned int            shift;
-
-       unsigned int            entries;
-       unsigned int            max_shift;
-
-       u32                     rnd;
-};
-
 struct netlink_table {
-       struct nl_portid_hash   hash;
+       struct rhashtable       hash;
        struct hlist_head       mc_list;
        struct listeners __rcu  *listeners;
        unsigned int            flags;
index 1af2962..7301850 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/netlink.h>
 #include <linux/sock_diag.h>
 #include <linux/netlink_diag.h>
+#include <linux/rhashtable.h>
 
 #include "af_netlink.h"
 
@@ -101,16 +102,20 @@ static int __netlink_diag_dump(struct sk_buff *skb, struct netlink_callback *cb,
                                int protocol, int s_num)
 {
        struct netlink_table *tbl = &nl_table[protocol];
-       struct nl_portid_hash *hash = &tbl->hash;
+       struct rhashtable *ht = &tbl->hash;
+       const struct bucket_table *htbl = rht_dereference(ht->tbl, ht);
        struct net *net = sock_net(skb->sk);
        struct netlink_diag_req *req;
+       struct netlink_sock *nlsk;
        struct sock *sk;
        int ret = 0, num = 0, i;
 
        req = nlmsg_data(cb->nlh);
 
-       for (i = 0; i <= hash->mask; i++) {
-               sk_for_each(sk, &hash->table[i]) {
+       for (i = 0; i < htbl->size; i++) {
+               rht_for_each_entry(nlsk, htbl->buckets[i], ht, node) {
+                       sk = (struct sock *)nlsk;
+
                        if (!net_eq(sock_net(sk), net))
                                continue;
                        if (num < s_num) {
index ede50d1..71cf1bf 100644 (file)
@@ -1418,7 +1418,7 @@ static int __init nr_proto_init(void)
                struct net_device *dev;
 
                sprintf(name, "nr%d", i);
-               dev = alloc_netdev(0, name, nr_setup);
+               dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, nr_setup);
                if (!dev) {
                        printk(KERN_ERR "NET/ROM: nr_proto_init - unable to allocate device structure\n");
                        goto fail;
index 7cc1830..e1638da 100644 (file)
@@ -457,12 +457,10 @@ static void digital_tg_recv_dep_req(struct nfc_digital_dev *ddev, void *arg,
                pr_err("Received a ACK/NACK PDU\n");
                rc = -EINVAL;
                goto exit;
-               break;
        case DIGITAL_NFC_DEP_PFB_SUPERVISOR_PDU:
                pr_err("Received a SUPERVISOR PDU\n");
                rc = -EINVAL;
                goto exit;
-               break;
        }
 
        skb_pull(resp, size);
index c36856a..fe5cda0 100644 (file)
@@ -38,7 +38,7 @@
 #include "vport.h"
 
 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
-                       const struct nlattr *attr, int len, bool keep_skb);
+                             const struct nlattr *attr, int len);
 
 static int make_writable(struct sk_buff *skb, int write_len)
 {
@@ -434,11 +434,17 @@ static int output_userspace(struct datapath *dp, struct sk_buff *skb,
        return ovs_dp_upcall(dp, skb, &upcall);
 }
 
+static bool last_action(const struct nlattr *a, int rem)
+{
+       return a->nla_len == rem;
+}
+
 static int sample(struct datapath *dp, struct sk_buff *skb,
                  const struct nlattr *attr)
 {
        const struct nlattr *acts_list = NULL;
        const struct nlattr *a;
+       struct sk_buff *sample_skb;
        int rem;
 
        for (a = nla_data(attr), rem = nla_len(attr); rem > 0;
@@ -455,8 +461,34 @@ static int sample(struct datapath *dp, struct sk_buff *skb,
                }
        }
 
-       return do_execute_actions(dp, skb, nla_data(acts_list),
-                                                nla_len(acts_list), true);
+       rem = nla_len(acts_list);
+       a = nla_data(acts_list);
+
+       /* Actions list is either empty or only contains a single user-space
+        * action, the latter being a special case as it is the only known
+        * usage of the sample action.
+        * In these special cases don't clone the skb as there are no
+        * side-effects in the nested actions.
+        * Otherwise, clone in case the nested actions have side effects.
+        */
+       if (likely(rem == 0 || (nla_type(a) == OVS_ACTION_ATTR_USERSPACE &&
+                               last_action(a, rem)))) {
+               sample_skb = skb;
+               skb_get(skb);
+       } else {
+               sample_skb = skb_clone(skb, GFP_ATOMIC);
+               if (!sample_skb) /* Skip sample action when out of memory. */
+                       return 0;
+       }
+
+       /* Note that do_execute_actions() never consumes skb.
+        * In the case where skb has been cloned above it is the clone that
+        * is consumed.  Otherwise the skb_get(skb) call prevents
+        * consumption by do_execute_actions(). Thus, it is safe to simply
+        * return the error code and let the caller (also
+        * do_execute_actions()) free skb on error.
+        */
+       return do_execute_actions(dp, sample_skb, a, rem);
 }
 
 static int execute_set_action(struct sk_buff *skb,
@@ -507,7 +539,7 @@ static int execute_set_action(struct sk_buff *skb,
 
 /* Execute a list of actions against 'skb'. */
 static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
-                       const struct nlattr *attr, int len, bool keep_skb)
+                             const struct nlattr *attr, int len)
 {
        /* Every output action needs a separate clone of 'skb', but the common
         * case is just a single output action, so that doing a clone and
@@ -551,6 +583,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
 
                case OVS_ACTION_ATTR_SAMPLE:
                        err = sample(dp, skb, a);
+                       if (unlikely(err)) /* skb already freed. */
+                               return err;
                        break;
                }
 
@@ -560,12 +594,9 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
                }
        }
 
-       if (prev_port != -1) {
-               if (keep_skb)
-                       skb = skb_clone(skb, GFP_ATOMIC);
-
+       if (prev_port != -1)
                do_output(dp, skb, prev_port);
-       } else if (!keep_skb)
+       else
                consume_skb(skb);
 
        return 0;
@@ -577,6 +608,5 @@ int ovs_execute_actions(struct datapath *dp, struct sk_buff *skb)
        struct sw_flow_actions *acts = rcu_dereference(OVS_CB(skb)->flow->sf_acts);
 
        OVS_CB(skb)->tun_key = NULL;
-       return do_execute_actions(dp, skb, acts->actions,
-                                        acts->actions_len, false);
+       return do_execute_actions(dp, skb, acts->actions, acts->actions_len);
 }
index 0d407bc..7ad3f02 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -66,16 +66,16 @@ static struct genl_family dp_packet_genl_family;
 static struct genl_family dp_flow_genl_family;
 static struct genl_family dp_datapath_genl_family;
 
-static struct genl_multicast_group ovs_dp_flow_multicast_group = {
-       .name = OVS_FLOW_MCGROUP
+static const struct genl_multicast_group ovs_dp_flow_multicast_group = {
+       .name = OVS_FLOW_MCGROUP,
 };
 
-static struct genl_multicast_group ovs_dp_datapath_multicast_group = {
-       .name = OVS_DATAPATH_MCGROUP
+static const struct genl_multicast_group ovs_dp_datapath_multicast_group = {
+       .name = OVS_DATAPATH_MCGROUP,
 };
 
-struct genl_multicast_group ovs_dp_vport_multicast_group = {
-       .name = OVS_VPORT_MCGROUP
+static const struct genl_multicast_group ovs_dp_vport_multicast_group = {
+       .name = OVS_VPORT_MCGROUP,
 };
 
 /* Check if need to build a reply message.
@@ -266,7 +266,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
                upcall.cmd = OVS_PACKET_CMD_MISS;
                upcall.key = &key;
                upcall.userdata = NULL;
-               upcall.portid = p->upcall_portid;
+               upcall.portid = ovs_vport_find_upcall_portid(p, skb);
                ovs_dp_upcall(dp, skb, &upcall);
                consume_skb(skb);
                stats_counter = &stats->n_missed;
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
        OVS_CB(skb)->flow = flow;
        OVS_CB(skb)->pkt_key = &key;
 
-       ovs_flow_stats_update(OVS_CB(skb)->flow, skb);
+       ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
        ovs_execute_actions(dp, skb);
        stats_counter = &stats->n_hit;
 
@@ -464,7 +464,8 @@ static int queue_userspace_packet(struct datapath *dp, struct sk_buff *skb,
        upcall->dp_ifindex = dp_ifindex;
 
        nla = nla_nest_start(user_skb, OVS_PACKET_ATTR_KEY);
-       ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
+       err = ovs_nla_put_flow(upcall_info->key, upcall_info->key, user_skb);
+       BUG_ON(err);
        nla_nest_end(user_skb, nla);
 
        if (upcall_info->userdata)
@@ -780,7 +781,7 @@ static struct sk_buff *ovs_flow_cmd_build_info(const struct sw_flow *flow,
 
        skb = ovs_flow_cmd_alloc_info(ovsl_dereference(flow->sf_acts), info,
                                      always);
-       if (!skb || IS_ERR(skb))
+       if (IS_ERR_OR_NULL(skb))
                return skb;
 
        retval = ovs_flow_cmd_fill_info(flow, dp_ifindex, skb,
@@ -889,8 +890,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
                }
                /* The unmasked key has to be the same for flow updates. */
                if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-                       error = -EEXIST;
-                       goto err_unlock_ovs;
+                       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+                       if (!flow) {
+                               error = -ENOENT;
+                               goto err_unlock_ovs;
+                       }
                }
                /* Update actions. */
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +985,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
                goto err_unlock_ovs;
        }
        /* Check that the flow exists. */
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
        if (unlikely(!flow)) {
                error = -ENOENT;
                goto err_unlock_ovs;
        }
-       /* The unmasked key has to be the same for flow updates. */
-       if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
-               error = -EEXIST;
-               goto err_unlock_ovs;
-       }
+
        /* Update actions, if present. */
        if (likely(acts)) {
                old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1063,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (!flow) {
                err = -ENOENT;
                goto unlock;
        }
@@ -1113,8 +1113,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
                goto unlock;
        }
 
-       flow = ovs_flow_tbl_lookup(&dp->table, &key);
-       if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) {
+       flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
+       if (unlikely(!flow)) {
                err = -ENOENT;
                goto unlock;
        }
@@ -1190,7 +1190,7 @@ static const struct nla_policy flow_policy[OVS_FLOW_ATTR_MAX + 1] = {
        [OVS_FLOW_ATTR_CLEAR] = { .type = NLA_FLAG },
 };
 
-static struct genl_ops dp_flow_genl_ops[] = {
+static const struct genl_ops dp_flow_genl_ops[] = {
        { .cmd = OVS_FLOW_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = flow_policy,
@@ -1374,7 +1374,7 @@ static int ovs_dp_cmd_new(struct sk_buff *skb, struct genl_info *info)
        parms.options = NULL;
        parms.dp = dp;
        parms.port_no = OVSP_LOCAL;
-       parms.upcall_portid = nla_get_u32(a[OVS_DP_ATTR_UPCALL_PID]);
+       parms.upcall_portids = a[OVS_DP_ATTR_UPCALL_PID];
 
        ovs_dp_change(dp, a);
 
@@ -1578,7 +1578,7 @@ static const struct nla_policy datapath_policy[OVS_DP_ATTR_MAX + 1] = {
        [OVS_DP_ATTR_USER_FEATURES] = { .type = NLA_U32 },
 };
 
-static struct genl_ops dp_datapath_genl_ops[] = {
+static const struct genl_ops dp_datapath_genl_ops[] = {
        { .cmd = OVS_DP_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = datapath_policy,
@@ -1633,8 +1633,8 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
 
        if (nla_put_u32(skb, OVS_VPORT_ATTR_PORT_NO, vport->port_no) ||
            nla_put_u32(skb, OVS_VPORT_ATTR_TYPE, vport->ops->type) ||
-           nla_put_string(skb, OVS_VPORT_ATTR_NAME, vport->ops->get_name(vport)) ||
-           nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, vport->upcall_portid))
+           nla_put_string(skb, OVS_VPORT_ATTR_NAME,
+                          vport->ops->get_name(vport)))
                goto nla_put_failure;
 
        ovs_vport_get_stats(vport, &vport_stats);
@@ -1642,6 +1642,9 @@ static int ovs_vport_cmd_fill_info(struct vport *vport, struct sk_buff *skb,
                    &vport_stats))
                goto nla_put_failure;
 
+       if (ovs_vport_get_upcall_portids(vport, skb))
+               goto nla_put_failure;
+
        err = ovs_vport_get_options(vport, skb);
        if (err == -EMSGSIZE)
                goto error;
@@ -1763,7 +1766,7 @@ static int ovs_vport_cmd_new(struct sk_buff *skb, struct genl_info *info)
        parms.options = a[OVS_VPORT_ATTR_OPTIONS];
        parms.dp = dp;
        parms.port_no = port_no;
-       parms.upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+       parms.upcall_portids = a[OVS_VPORT_ATTR_UPCALL_PID];
 
        vport = new_vport(&parms);
        err = PTR_ERR(vport);
@@ -1813,8 +1816,14 @@ static int ovs_vport_cmd_set(struct sk_buff *skb, struct genl_info *info)
                        goto exit_unlock_free;
        }
 
-       if (a[OVS_VPORT_ATTR_UPCALL_PID])
-               vport->upcall_portid = nla_get_u32(a[OVS_VPORT_ATTR_UPCALL_PID]);
+
+       if (a[OVS_VPORT_ATTR_UPCALL_PID]) {
+               struct nlattr *ids = a[OVS_VPORT_ATTR_UPCALL_PID];
+
+               err = ovs_vport_set_upcall_portids(vport, ids);
+               if (err)
+                       goto exit_unlock_free;
+       }
 
        err = ovs_vport_cmd_fill_info(vport, reply, info->snd_portid,
                                      info->snd_seq, 0, OVS_VPORT_CMD_NEW);
@@ -1945,7 +1954,7 @@ static const struct nla_policy vport_policy[OVS_VPORT_ATTR_MAX + 1] = {
        [OVS_VPORT_ATTR_OPTIONS] = { .type = NLA_NESTED },
 };
 
-static struct genl_ops dp_vport_genl_ops[] = {
+static const struct genl_ops dp_vport_genl_ops[] = {
        { .cmd = OVS_VPORT_CMD_NEW,
          .flags = GENL_ADMIN_PERM, /* Requires CAP_NET_ADMIN privilege. */
          .policy = vport_policy,
@@ -2054,10 +2063,14 @@ static int __init dp_init(void)
 
        pr_info("Open vSwitch switching datapath\n");
 
-       err = ovs_flow_init();
+       err = ovs_internal_dev_rtnl_link_register();
        if (err)
                goto error;
 
+       err = ovs_flow_init();
+       if (err)
+               goto error_unreg_rtnl_link;
+
        err = ovs_vport_init();
        if (err)
                goto error_flow_exit;
@@ -2084,6 +2097,8 @@ error_vport_exit:
        ovs_vport_exit();
 error_flow_exit:
        ovs_flow_exit();
+error_unreg_rtnl_link:
+       ovs_internal_dev_rtnl_link_unregister();
 error:
        return err;
 }
@@ -2096,6 +2111,7 @@ static void dp_cleanup(void)
        rcu_barrier();
        ovs_vport_exit();
        ovs_flow_exit();
+       ovs_internal_dev_rtnl_link_unregister();
 }
 
 module_init(dp_init);
index 7ede507..701b573 100644 (file)
@@ -144,7 +144,7 @@ int lockdep_ovsl_is_held(void);
 #define lockdep_ovsl_is_held() 1
 #endif
 
-#define ASSERT_OVSL()          WARN_ON(unlikely(!lockdep_ovsl_is_held()))
+#define ASSERT_OVSL()          WARN_ON(!lockdep_ovsl_is_held())
 #define ovsl_dereference(p)                                    \
        rcu_dereference_protected(p, lockdep_ovsl_is_held())
 #define rcu_dereference_ovsl(p)                                        \
index 334751c..d07ab53 100644 (file)
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
 
 #define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
 
-void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb)
+void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
+                          struct sk_buff *skb)
 {
        struct flow_stats *stats;
-       __be16 tcp_flags = flow->key.tp.flags;
        int node = numa_node_id();
 
        stats = rcu_dereference(flow->stats[node]);
index ac395d2..5e5aaed 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2007-2013 Nicira, Inc.
+ * Copyright (c) 2007-2014 Nicira, Inc.
  *
  * This program is free software; you can redistribute it and/or
  * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
        unsigned char       ar_tip[4];          /* target IP address        */
 } __packed;
 
-void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *);
+void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
+                          struct sk_buff *);
 void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
                        unsigned long *used, __be16 *tcp_flags);
 void ovs_flow_stats_clear(struct sw_flow *);
index 574c3ab..cf2d853 100644 (file)
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
        return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
 }
 
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+                                         struct sw_flow_match *match)
+{
+       struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
+       struct sw_flow_mask *mask;
+       struct sw_flow *flow;
+
+       /* Always called under ovs-mutex. */
+       list_for_each_entry(mask, &tbl->mask_list, list) {
+               flow = masked_flow_lookup(ti, match->key, mask);
+               if (flow && ovs_flow_cmp_unmasked_key(flow, match))  /* Found */
+                       return flow;
+       }
+       return NULL;
+}
+
 int ovs_flow_tbl_num_masks(const struct flow_table *table)
 {
        struct sw_flow_mask *mask;
index ca8a582..5918bff 100644 (file)
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
                                    u32 *n_mask_hit);
 struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
                                    const struct sw_flow_key *);
-
+struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
+                                         struct sw_flow_match *match);
 bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
                               struct sw_flow_match *match);
 
index 35ec4fe..f49148a 100644 (file)
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
        return PACKET_RCVD;
 }
 
+/* Called with rcu_read_lock and BH disabled. */
+static int gre_err(struct sk_buff *skb, u32 info,
+                  const struct tnl_ptk_info *tpi)
+{
+       struct ovs_net *ovs_net;
+       struct vport *vport;
+
+       ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
+       vport = rcu_dereference(ovs_net->vport_net.gre_vport);
+
+       if (unlikely(!vport))
+               return PACKET_REJECT;
+       else
+               return PACKET_RCVD;
+}
+
 static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
 {
        struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
 
 static struct gre_cisco_protocol gre_protocol = {
        .handler        = gre_rcv,
+       .err_handler    = gre_err,
        .priority       = 1,
 };
 
index 789af92..8451612 100644 (file)
@@ -26,6 +26,7 @@
 
 #include <net/dst.h>
 #include <net/xfrm.h>
+#include <net/rtnetlink.h>
 
 #include "datapath.h"
 #include "vport-internal_dev.h"
@@ -121,6 +122,10 @@ static const struct net_device_ops internal_dev_netdev_ops = {
        .ndo_get_stats64 = internal_dev_get_stats,
 };
 
+static struct rtnl_link_ops internal_dev_link_ops __read_mostly = {
+       .kind = "openvswitch",
+};
+
 static void do_setup(struct net_device *netdev)
 {
        ether_setup(netdev);
@@ -131,14 +136,18 @@ static void do_setup(struct net_device *netdev)
        netdev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
        netdev->destructor = internal_dev_destructor;
        netdev->ethtool_ops = &internal_dev_ethtool_ops;
+       netdev->rtnl_link_ops = &internal_dev_link_ops;
        netdev->tx_queue_len = 0;
 
        netdev->features = NETIF_F_LLTX | NETIF_F_SG | NETIF_F_FRAGLIST |
-                          NETIF_F_HIGHDMA | NETIF_F_HW_CSUM | NETIF_F_GSO_SOFTWARE;
+                          NETIF_F_HIGHDMA | NETIF_F_HW_CSUM |
+                          NETIF_F_GSO_SOFTWARE | NETIF_F_GSO_ENCAP_ALL;
 
        netdev->vlan_features = netdev->features;
+       netdev->hw_enc_features = netdev->features;
        netdev->features |= NETIF_F_HW_VLAN_CTAG_TX;
        netdev->hw_features = netdev->features & ~NETIF_F_LLTX;
+
        eth_hw_addr_random(netdev);
 }
 
@@ -159,7 +168,8 @@ static struct vport *internal_dev_create(const struct vport_parms *parms)
        netdev_vport = netdev_vport_priv(vport);
 
        netdev_vport->dev = alloc_netdev(sizeof(struct internal_dev),
-                                        parms->name, do_setup);
+                                        parms->name, NET_NAME_UNKNOWN,
+                                        do_setup);
        if (!netdev_vport->dev) {
                err = -ENOMEM;
                goto error_free_vport;
@@ -248,3 +258,13 @@ struct vport *ovs_internal_dev_get_vport(struct net_device *netdev)
 
        return internal_dev_priv(netdev)->vport;
 }
+
+int ovs_internal_dev_rtnl_link_register(void)
+{
+       return rtnl_link_register(&internal_dev_link_ops);
+}
+
+void ovs_internal_dev_rtnl_link_unregister(void)
+{
+       rtnl_link_unregister(&internal_dev_link_ops);
+}
index 9a7d30e..1b179a1 100644 (file)
@@ -24,5 +24,7 @@
 
 int ovs_is_internal_dev(const struct net_device *);
 struct vport *ovs_internal_dev_get_vport(struct net_device *);
+int ovs_internal_dev_rtnl_link_register(void);
+void ovs_internal_dev_rtnl_link_unregister(void);
 
 #endif /* vport-internal_dev.h */
index 0edbd95..d8b7e24 100644 (file)
@@ -143,8 +143,6 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
        struct rtable *rt;
        struct flowi4 fl;
        __be16 src_port;
-       int port_min;
-       int port_max;
        __be16 df;
        int err;
 
@@ -172,8 +170,7 @@ static int vxlan_tnl_send(struct vport *vport, struct sk_buff *skb)
 
        skb->ignore_df = 1;
 
-       inet_get_local_port_range(net, &port_min, &port_max);
-       src_port = vxlan_src_port(port_min, port_max, skb);
+       src_port = udp_flow_src_port(net, skb, 0, 0, true);
 
        err = vxlan_xmit_skb(vxlan_port->vs, rt, skb,
                             fl.saddr, OVS_CB(skb)->tun_key->ipv4_dst,
index 42c0f4a..702fb21 100644 (file)
@@ -134,10 +134,12 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
 
        vport->dp = parms->dp;
        vport->port_no = parms->port_no;
-       vport->upcall_portid = parms->upcall_portid;
        vport->ops = ops;
        INIT_HLIST_NODE(&vport->dp_hash_node);
 
+       if (ovs_vport_set_upcall_portids(vport, parms->upcall_portids))
+               return ERR_PTR(-EINVAL);
+
        vport->percpu_stats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
        if (!vport->percpu_stats) {
                kfree(vport);
@@ -161,6 +163,10 @@ struct vport *ovs_vport_alloc(int priv_size, const struct vport_ops *ops,
  */
 void ovs_vport_free(struct vport *vport)
 {
+       /* vport is freed from RCU callback or error path, Therefore
+        * it is safe to use raw dereference.
+        */
+       kfree(rcu_dereference_raw(vport->upcall_portids));
        free_percpu(vport->percpu_stats);
        kfree(vport);
 }
@@ -326,6 +332,99 @@ int ovs_vport_get_options(const struct vport *vport, struct sk_buff *skb)
        return 0;
 }
 
+/**
+ *     ovs_vport_set_upcall_portids - set upcall portids of @vport.
+ *
+ * @vport: vport to modify.
+ * @ids: new configuration, an array of port ids.
+ *
+ * Sets the vport's upcall_portids to @ids.
+ *
+ * Returns 0 if successful, -EINVAL if @ids is zero length or cannot be parsed
+ * as an array of U32.
+ *
+ * Must be called with ovs_mutex.
+ */
+int ovs_vport_set_upcall_portids(struct vport *vport,  struct nlattr *ids)
+{
+       struct vport_portids *old, *vport_portids;
+
+       if (!nla_len(ids) || nla_len(ids) % sizeof(u32))
+               return -EINVAL;
+
+       old = ovsl_dereference(vport->upcall_portids);
+
+       vport_portids = kmalloc(sizeof(*vport_portids) + nla_len(ids),
+                               GFP_KERNEL);
+       if (!vport_portids)
+               return -ENOMEM;
+
+       vport_portids->n_ids = nla_len(ids) / sizeof(u32);
+       vport_portids->rn_ids = reciprocal_value(vport_portids->n_ids);
+       nla_memcpy(vport_portids->ids, ids, nla_len(ids));
+
+       rcu_assign_pointer(vport->upcall_portids, vport_portids);
+
+       if (old)
+               kfree_rcu(old, rcu);
+       return 0;
+}
+
+/**
+ *     ovs_vport_get_upcall_portids - get the upcall_portids of @vport.
+ *
+ * @vport: vport from which to retrieve the portids.
+ * @skb: sk_buff where portids should be appended.
+ *
+ * Retrieves the configuration of the given vport, appending the
+ * %OVS_VPORT_ATTR_UPCALL_PID attribute which is the array of upcall
+ * portids to @skb.
+ *
+ * Returns 0 if successful, -EMSGSIZE if @skb has insufficient room.
+ * If an error occurs, @skb is left unmodified.  Must be called with
+ * ovs_mutex or rcu_read_lock.
+ */
+int ovs_vport_get_upcall_portids(const struct vport *vport,
+                                struct sk_buff *skb)
+{
+       struct vport_portids *ids;
+
+       ids = rcu_dereference_ovsl(vport->upcall_portids);
+
+       if (vport->dp->user_features & OVS_DP_F_VPORT_PIDS)
+               return nla_put(skb, OVS_VPORT_ATTR_UPCALL_PID,
+                              ids->n_ids * sizeof(u32), (void *)ids->ids);
+       else
+               return nla_put_u32(skb, OVS_VPORT_ATTR_UPCALL_PID, ids->ids[0]);
+}
+
+/**
+ *     ovs_vport_find_upcall_portid - find the upcall portid to send upcall.
+ *
+ * @vport: vport from which the missed packet is received.
+ * @skb: skb that the missed packet was received.
+ *
+ * Uses the skb_get_hash() to select the upcall portid to send the
+ * upcall.
+ *
+ * Returns the portid of the target socket.  Must be called with rcu_read_lock.
+ */
+u32 ovs_vport_find_upcall_portid(const struct vport *p, struct sk_buff *skb)
+{
+       struct vport_portids *ids;
+       u32 ids_index;
+       u32 hash;
+
+       ids = rcu_dereference(p->upcall_portids);
+
+       if (ids->n_ids == 1 && ids->ids[0] == 0)
+               return 0;
+
+       hash = skb_get_hash(skb);
+       ids_index = hash - ids->n_ids * reciprocal_divide(hash, ids->rn_ids);
+       return ids->ids[ids_index];
+}
+
 /**
  *     ovs_vport_receive - pass up received packet to the datapath for processing
  *
index 8d721e6..35f89d8 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/list.h>
 #include <linux/netlink.h>
 #include <linux/openvswitch.h>
+#include <linux/reciprocal_div.h>
 #include <linux/skbuff.h>
 #include <linux/spinlock.h>
 #include <linux/u64_stats_sync.h>
@@ -52,6 +53,10 @@ void ovs_vport_get_stats(struct vport *, struct ovs_vport_stats *);
 int ovs_vport_set_options(struct vport *, struct nlattr *options);
 int ovs_vport_get_options(const struct vport *, struct sk_buff *);
 
+int ovs_vport_set_upcall_portids(struct vport *, struct nlattr *pids);
+int ovs_vport_get_upcall_portids(const struct vport *, struct sk_buff *);
+u32 ovs_vport_find_upcall_portid(const struct vport *, struct sk_buff *);
+
 int ovs_vport_send(struct vport *, struct sk_buff *);
 
 /* The following definitions are for implementers of vport devices: */
@@ -62,13 +67,27 @@ struct vport_err_stats {
        u64 tx_dropped;
        u64 tx_errors;
 };
+/**
+ * struct vport_portids - array of netlink portids of a vport.
+ *                        must be protected by rcu.
+ * @rn_ids: The reciprocal value of @n_ids.
+ * @rcu: RCU callback head for deferred destruction.
+ * @n_ids: Size of @ids array.
+ * @ids: Array storing the Netlink socket pids to be used for packets received
+ * on this port that miss the flow table.
+ */
+struct vport_portids {
+       struct reciprocal_value rn_ids;
+       struct rcu_head rcu;
+       u32 n_ids;
+       u32 ids[];
+};
 
 /**
  * struct vport - one port within a datapath
  * @rcu: RCU callback head for deferred destruction.
  * @dp: Datapath to which this port belongs.
- * @upcall_portid: The Netlink port to use for packets received on this port that
- * miss the flow table.
+ * @upcall_portids: RCU protected 'struct vport_portids'.
  * @port_no: Index into @dp's @ports array.
  * @hash_node: Element in @dev_table hash table in vport.c.
  * @dp_hash_node: Element in @datapath->ports hash table in datapath.c.
@@ -80,7 +99,7 @@ struct vport_err_stats {
 struct vport {
        struct rcu_head rcu;
        struct datapath *dp;
-       u32 upcall_portid;
+       struct vport_portids __rcu *upcall_portids;
        u16 port_no;
 
        struct hlist_node hash_node;
@@ -111,7 +130,7 @@ struct vport_parms {
        /* For ovs_vport_alloc(). */
        struct datapath *dp;
        u16 port_no;
-       u32 upcall_portid;
+       struct nlattr *upcall_portids;
 };
 
 /**
index b85c67c..8d9f804 100644 (file)
@@ -441,14 +441,10 @@ static __u32 tpacket_get_timestamp(struct sk_buff *skb, struct timespec *ts,
 {
        struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb);
 
-       if (shhwtstamps) {
-               if ((flags & SOF_TIMESTAMPING_SYS_HARDWARE) &&
-                   ktime_to_timespec_cond(shhwtstamps->syststamp, ts))
-                       return TP_STATUS_TS_SYS_HARDWARE;
-               if ((flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
-                   ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
-                       return TP_STATUS_TS_RAW_HARDWARE;
-       }
+       if (shhwtstamps &&
+           (flags & SOF_TIMESTAMPING_RAW_HARDWARE) &&
+           ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts))
+               return TP_STATUS_TS_RAW_HARDWARE;
 
        if (ktime_to_timespec_cond(skb->tstamp, ts))
                return TP_STATUS_TS_SOFTWARE;
@@ -3071,10 +3067,8 @@ static int packet_dev_mc(struct net_device *dev, struct packet_mclist *i,
                break;
        case PACKET_MR_PROMISC:
                return dev_set_promiscuity(dev, what);
-               break;
        case PACKET_MR_ALLMULTI:
                return dev_set_allmulti(dev, what);
-               break;
        case PACKET_MR_UNICAST:
                if (i->alen != dev->addr_len)
                        return -EINVAL;
index 66dc65e..e9a83a6 100644 (file)
@@ -267,7 +267,7 @@ int gprs_attach(struct sock *sk)
                return -EINVAL; /* need packet boundaries */
 
        /* Create net device */
-       dev = alloc_netdev(sizeof(*gp), ifname, gprs_setup);
+       dev = alloc_netdev(sizeof(*gp), ifname, NET_NAME_UNKNOWN, gprs_setup);
        if (!dev)
                return -ENOMEM;
        gp = netdev_priv(dev);
index 8451c8c..a85c1a0 100644 (file)
@@ -1538,7 +1538,7 @@ static int __init rose_proto_init(void)
                char name[IFNAMSIZ];
 
                sprintf(name, "rose%d", i);
-               dev = alloc_netdev(0, name, rose_setup);
+               dev = alloc_netdev(0, name, NET_NAME_UNKNOWN, rose_setup);
                if (!dev) {
                        printk(KERN_ERR "ROSE: rose_proto_init - unable to allocate memory\n");
                        rc = -ENOMEM;
index 0ad0807..eec998e 100644 (file)
@@ -346,7 +346,7 @@ static int rxrpc_krb5_decode_tagged_array(struct krb5_tagged_data **_td,
 
        n_elem = ntohl(*xdr++);
        toklen -= 4;
-       if (n_elem < 0 || n_elem > max_n_elem)
+       if (n_elem > max_n_elem)
                return -EINVAL;
        *_n_elem = n_elem;
        if (n_elem > 0) {
index 4f912c0..eb48306 100644 (file)
@@ -218,10 +218,12 @@ static int mirred_device_event(struct notifier_block *unused,
 
        if (event == NETDEV_UNREGISTER)
                list_for_each_entry(m, &mirred_list, tcfm_list) {
+                       spin_lock_bh(&m->tcf_lock);
                        if (m->tcfm_dev == dev) {
                                dev_put(dev);
                                m->tcfm_dev = NULL;
                        }
+                       spin_unlock_bh(&m->tcf_lock);
                }
 
        return NOTIFY_DONE;
index 45527e6..c28b0d3 100644 (file)
@@ -561,13 +561,14 @@ EXPORT_SYMBOL(tcf_exts_change);
 int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
 {
 #ifdef CONFIG_NET_CLS_ACT
+       struct nlattr *nest;
+
        if (exts->action && !list_empty(&exts->actions)) {
                /*
                 * again for backward compatible mode - we want
                 * to work with both old and new modes of entering
                 * tc data even if iproute2  was newer - jhs
                 */
-               struct nlattr *nest;
                if (exts->type != TCA_OLD_COMPAT) {
                        nest = nla_nest_start(skb, exts->action);
                        if (nest == NULL)
@@ -585,10 +586,14 @@ int tcf_exts_dump(struct sk_buff *skb, struct tcf_exts *exts)
                        nla_nest_end(skb, nest);
                }
        }
-#endif
        return 0;
-nla_put_failure: __attribute__ ((unused))
+
+nla_put_failure:
+       nla_nest_cancel(skb, nest);
        return -1;
+#else
+       return 0;
+#endif
 }
 EXPORT_SYMBOL(tcf_exts_dump);
 
index 13f64df..0e30d58 100644 (file)
@@ -30,7 +30,7 @@ struct cls_bpf_head {
 };
 
 struct cls_bpf_prog {
-       struct sk_filter *filter;
+       struct bpf_prog *filter;
        struct sock_filter *bpf_ops;
        struct tcf_exts exts;
        struct tcf_result res;
@@ -54,7 +54,7 @@ static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
        int ret;
 
        list_for_each_entry(prog, &head->plist, link) {
-               int filter_res = SK_RUN_FILTER(prog->filter, skb);
+               int filter_res = BPF_PROG_RUN(prog->filter, skb);
 
                if (filter_res == 0)
                        continue;
@@ -92,7 +92,7 @@ static void cls_bpf_delete_prog(struct tcf_proto *tp, struct cls_bpf_prog *prog)
        tcf_unbind_filter(tp, &prog->res);
        tcf_exts_destroy(tp, &prog->exts);
 
-       sk_unattached_filter_destroy(prog->filter);
+       bpf_prog_destroy(prog->filter);
 
        kfree(prog->bpf_ops);
        kfree(prog);
@@ -161,7 +161,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
        struct sock_filter *bpf_ops, *bpf_old;
        struct tcf_exts exts;
        struct sock_fprog_kern tmp;
-       struct sk_filter *fp, *fp_old;
+       struct bpf_prog *fp, *fp_old;
        u16 bpf_size, bpf_len;
        u32 classid;
        int ret;
@@ -193,7 +193,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
        tmp.len = bpf_len;
        tmp.filter = bpf_ops;
 
-       ret = sk_unattached_filter_create(&fp, &tmp);
+       ret = bpf_prog_create(&fp, &tmp);
        if (ret)
                goto errout_free;
 
@@ -211,7 +211,7 @@ static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
        tcf_exts_change(tp, &prog->exts, &exts);
 
        if (fp_old)
-               sk_unattached_filter_destroy(fp_old);
+               bpf_prog_destroy(fp_old);
        if (bpf_old)
                kfree(bpf_old);
 
index c721cd4..3e9f764 100644 (file)
@@ -420,7 +420,7 @@ static void tcindex_destroy(struct tcf_proto *tp)
        pr_debug("tcindex_destroy(tp %p),p %p\n", tp, p);
        walker.count = 0;
        walker.skip = 0;
-       walker.fn = &tcindex_destroy_element;
+       walker.fn = tcindex_destroy_element;
        tcindex_walk(tp, &walker);
        kfree(p->perfect);
        kfree(p->h);
index c39b583..70c0be8 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/errno.h>
 #include <linux/rtnetlink.h>
 #include <linux/skbuff.h>
+#include <linux/bitmap.h>
 #include <net/netlink.h>
 #include <net/act_api.h>
 #include <net/pkt_cls.h>
@@ -460,17 +461,25 @@ static int u32_delete(struct tcf_proto *tp, unsigned long arg)
        return 0;
 }
 
+#define NR_U32_NODE (1<<12)
 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
 {
        struct tc_u_knode *n;
-       unsigned int i = 0x7FF;
+       unsigned long i;
+       unsigned long *bitmap = kzalloc(BITS_TO_LONGS(NR_U32_NODE) * sizeof(unsigned long),
+                                       GFP_KERNEL);
+       if (!bitmap)
+               return handle | 0xFFF;
 
        for (n = ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
-               if (i < TC_U32_NODE(n->handle))
-                       i = TC_U32_NODE(n->handle);
-       i++;
+               set_bit(TC_U32_NODE(n->handle), bitmap);
 
-       return handle | (i > 0xFFF ? 0xFFF : i);
+       i = find_next_zero_bit(bitmap, NR_U32_NODE, 0x800);
+       if (i >= NR_U32_NODE)
+               i = find_next_zero_bit(bitmap, NR_U32_NODE, 1);
+
+       kfree(bitmap);
+       return handle | (i >= NR_U32_NODE ? 0xFFF : i);
 }
 
 static const struct nla_policy u32_policy[TCA_U32_MAX + 1] = {
index bfd34e4..7c292d4 100644 (file)
@@ -125,7 +125,6 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len,
 {
        struct can_filter *conf = data; /* Array with rules */
        struct canid_match *cm;
-       struct canid_match *cm_old = (struct canid_match *)m->data;
        int i;
 
        if (!len)
@@ -181,12 +180,6 @@ static int em_canid_change(struct tcf_proto *tp, void *data, int len,
 
        m->datalen = sizeof(struct canid_match) + len;
        m->data = (unsigned long)cm;
-
-       if (cm_old != NULL) {
-               pr_err("canid: Configuring an existing ematch!\n");
-               kfree(cm_old);
-       }
-
        return 0;
 }
 
index e1543b0..fc04fe9 100644 (file)
@@ -108,7 +108,7 @@ static inline int handle_dev_cpu_collision(struct sk_buff *skb,
 
 /*
  * Transmit one skb, and handle the return status as required. Holding the
- * __QDISC_STATE_RUNNING bit guarantees that only one CPU can execute this
+ * __QDISC___STATE_RUNNING bit guarantees that only one CPU can execute this
  * function.
  *
  * Returns to the caller:
@@ -156,7 +156,7 @@ int sch_direct_xmit(struct sk_buff *skb, struct Qdisc *q,
 /*
  * NOTE: Called under qdisc_lock(q) with locally disabled BH.
  *
- * __QDISC_STATE_RUNNING guarantees only one CPU can process
+ * __QDISC___STATE_RUNNING guarantees only one CPU can process
  * this qdisc at a time. qdisc_lock(q) serializes queue accesses for
  * this queue.
  *
index 4741671..bd33793 100644 (file)
@@ -485,8 +485,8 @@ static int __init teql_init(void)
                struct net_device *dev;
                struct teql_master *master;
 
-               dev = alloc_netdev(sizeof(struct teql_master),
-                                 "teql%d", teql_master_setup);
+               dev = alloc_netdev(sizeof(struct teql_master), "teql%d",
+                                  NET_NAME_UNKNOWN, teql_master_setup);
                if (!dev) {
                        err = -ENOMEM;
                        break;
index 5c30b7a..3b4ffb0 100644 (file)
@@ -8,7 +8,7 @@ obj-$(CONFIG_NET_SCTPPROBE) += sctp_probe.o
 sctp-y := sm_statetable.o sm_statefuns.o sm_sideeffect.o \
          protocol.o endpointola.o associola.o \
          transport.o chunk.o sm_make_chunk.o ulpevent.o \
-         inqueue.o outqueue.o ulpqueue.o command.o \
+         inqueue.o outqueue.o ulpqueue.o \
          tsnmap.o bind_addr.o socket.o primitive.o \
          output.o input.o debug.o ssnmap.o auth.o
 
index 9de23a2..06a9ee6 100644 (file)
@@ -1097,6 +1097,7 @@ void sctp_assoc_update(struct sctp_association *asoc,
        asoc->c = new->c;
        asoc->peer.rwnd = new->peer.rwnd;
        asoc->peer.sack_needed = new->peer.sack_needed;
+       asoc->peer.auth_capable = new->peer.auth_capable;
        asoc->peer.i = new->peer.i;
        sctp_tsnmap_init(&asoc->peer.tsn_map, SCTP_TSN_MAP_INITIAL,
                         asoc->peer.i.initial_tsn, GFP_ATOMIC);
diff --git a/net/sctp/command.c b/net/sctp/command.c
deleted file mode 100644 (file)
index dd73758..0000000
+++ /dev/null
@@ -1,68 +0,0 @@
-/* SCTP kernel implementation Copyright (C) 1999-2001
- * Cisco, Motorola, and IBM
- * Copyright 2001 La Monte H.P. Yarroll
- *
- * This file is part of the SCTP kernel implementation
- *
- * These functions manipulate sctp command sequences.
- *
- * This SCTP implementation is free software;
- * you can redistribute it and/or modify it under the terms of
- * the GNU General Public License as published by
- * the Free Software Foundation; either version 2, or (at your option)
- * any later version.
- *
- * This SCTP implementation is distributed in the hope that it
- * will be useful, but WITHOUT ANY WARRANTY; without even the implied
- *                 ************************
- * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
- * See the GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with GNU CC; see the file COPYING.  If not, see
- * <http://www.gnu.org/licenses/>.
- *
- * Please send any bug reports or fixes you make to the
- * email address(es):
- *    lksctp developers <linux-sctp@vger.kernel.org>
- *
- * Written or modified by:
- *    La Monte H.P. Yarroll <piggy@acm.org>
- *    Karl Knutson <karl@athena.chicago.il.us>
- */
-
-#include <linux/types.h>
-#include <net/sctp/sctp.h>
-#include <net/sctp/sm.h>
-
-/* Initialize a block of memory as a command sequence. */
-int sctp_init_cmd_seq(sctp_cmd_seq_t *seq)
-{
-       memset(seq, 0, sizeof(sctp_cmd_seq_t));
-       return 1;               /* We always succeed.  */
-}
-
-/* Add a command to a sctp_cmd_seq_t.
- * Return 0 if the command sequence is full.
- */
-void sctp_add_cmd_sf(sctp_cmd_seq_t *seq, sctp_verb_t verb, sctp_arg_t obj)
-{
-       BUG_ON(seq->next_free_slot >= SCTP_MAX_NUM_COMMANDS);
-
-       seq->cmds[seq->next_free_slot].verb = verb;
-       seq->cmds[seq->next_free_slot++].obj = obj;
-}
-
-/* Return the next command structure in a sctp_cmd_seq.
- * Returns NULL at the end of the sequence.
- */
-sctp_cmd_t *sctp_next_cmd(sctp_cmd_seq_t *seq)
-{
-       sctp_cmd_t *retval = NULL;
-
-       if (seq->next_cmd < seq->next_free_slot)
-               retval = &seq->cmds[seq->next_cmd++];
-
-       return retval;
-}
-
index f2e2cbd..c1b9912 100644 (file)
@@ -575,11 +575,6 @@ void sctp_v4_err(struct sk_buff *skb, __u32 info)
        int err;
        struct net *net = dev_net(skb->dev);
 
-       if (skb->len < ihlen + 8) {
-               ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
-               return;
-       }
-
        /* Fix up skb to look at the embedded net header. */
        saveip = skb->network_header;
        savesctp = skb->transport_header;
index 1999592..0e4198e 100644 (file)
@@ -434,7 +434,7 @@ static void sctp_v6_from_sk(union sctp_addr *addr, struct sock *sk)
 /* Initialize sk->sk_rcv_saddr from sctp_addr. */
 static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
 {
-       if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
+       if (addr->sa.sa_family == AF_INET) {
                sk->sk_v6_rcv_saddr.s6_addr32[0] = 0;
                sk->sk_v6_rcv_saddr.s6_addr32[1] = 0;
                sk->sk_v6_rcv_saddr.s6_addr32[2] = htonl(0x0000ffff);
@@ -448,7 +448,7 @@ static void sctp_v6_to_sk_saddr(union sctp_addr *addr, struct sock *sk)
 /* Initialize sk->sk_daddr from sctp_addr. */
 static void sctp_v6_to_sk_daddr(union sctp_addr *addr, struct sock *sk)
 {
-       if (addr->sa.sa_family == AF_INET && sctp_sk(sk)->v4mapped) {
+       if (addr->sa.sa_family == AF_INET) {
                sk->sk_v6_daddr.s6_addr32[0] = 0;
                sk->sk_v6_daddr.s6_addr32[1] = 0;
                sk->sk_v6_daddr.s6_addr32[2] = htonl(0x0000ffff);
@@ -556,8 +556,6 @@ static int sctp_v6_available(union sctp_addr *addr, struct sctp_sock *sp)
        if (IPV6_ADDR_ANY == type)
                return 1;
        if (type == IPV6_ADDR_MAPPED) {
-               if (sp && !sp->v4mapped)
-                       return 0;
                if (sp && ipv6_only_sock(sctp_opt2sk(sp)))
                        return 0;
                sctp_v6_map_v4(addr);
@@ -587,8 +585,6 @@ static int sctp_v6_addr_valid(union sctp_addr *addr,
                /* Note: This routine is used in input, so v4-mapped-v6
                 * are disallowed here when there is no sctp_sock.
                 */
-               if (!sp || !sp->v4mapped)
-                       return 0;
                if (sp && ipv6_only_sock(sctp_opt2sk(sp)))
                        return 0;
                sctp_v6_map_v4(addr);
@@ -675,11 +671,23 @@ out:
        return newsk;
 }
 
-/* Map v4 address to mapped v6 address */
-static void sctp_v6_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr)
+/* Format a sockaddr for return to user space. This makes sure the return is
+ * AF_INET or AF_INET6 depending on the SCTP_I_WANT_MAPPED_V4_ADDR option.
+ */
+static int sctp_v6_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
 {
-       if (sp->v4mapped && AF_INET == addr->sa.sa_family)
-               sctp_v4_map_v6(addr);
+       if (sp->v4mapped) {
+               if (addr->sa.sa_family == AF_INET)
+                       sctp_v4_map_v6(addr);
+       } else {
+               if (addr->sa.sa_family == AF_INET6 &&
+                   ipv6_addr_v4mapped(&addr->v6.sin6_addr))
+                       sctp_v6_map_v4(addr);
+       }
+
+       if (addr->sa.sa_family == AF_INET)
+               return sizeof(struct sockaddr_in);
+       return sizeof(struct sockaddr_in6);
 }
 
 /* Where did this skb come from?  */
@@ -706,82 +714,68 @@ static void sctp_v6_ecn_capable(struct sock *sk)
        inet6_sk(sk)->tclass |= INET_ECN_ECT_0;
 }
 
-/* Initialize a PF_INET6 socket msg_name. */
-static void sctp_inet6_msgname(char *msgname, int *addr_len)
-{
-       struct sockaddr_in6 *sin6;
-
-       sin6 = (struct sockaddr_in6 *)msgname;
-       sin6->sin6_family = AF_INET6;
-       sin6->sin6_flowinfo = 0;
-       sin6->sin6_scope_id = 0; /*FIXME */
-       *addr_len = sizeof(struct sockaddr_in6);
-}
-
 /* Initialize a PF_INET msgname from a ulpevent. */
 static void sctp_inet6_event_msgname(struct sctp_ulpevent *event,
                                     char *msgname, int *addrlen)
 {
-       struct sockaddr_in6 *sin6, *sin6from;
-
-       if (msgname) {
-               union sctp_addr *addr;
-               struct sctp_association *asoc;
-
-               asoc = event->asoc;
-               sctp_inet6_msgname(msgname, addrlen);
-               sin6 = (struct sockaddr_in6 *)msgname;
-               sin6->sin6_port = htons(asoc->peer.port);
-               addr = &asoc->peer.primary_addr;
+       union sctp_addr *addr;
+       struct sctp_association *asoc;
+       union sctp_addr *paddr;
 
-               /* Note: If we go to a common v6 format, this code
-                * will change.
-                */
+       if (!msgname)
+               return;
 
-               /* Map ipv4 address into v4-mapped-on-v6 address.  */
-               if (sctp_sk(asoc->base.sk)->v4mapped &&
-                   AF_INET == addr->sa.sa_family) {
-                       sctp_v4_map_v6((union sctp_addr *)sin6);
-                       sin6->sin6_addr.s6_addr32[3] =
-                               addr->v4.sin_addr.s_addr;
-                       return;
-               }
+       addr = (union sctp_addr *)msgname;
+       asoc = event->asoc;
+       paddr = &asoc->peer.primary_addr;
 
-               sin6from = &asoc->peer.primary_addr.v6;
-               sin6->sin6_addr = sin6from->sin6_addr;
-               if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL)
-                       sin6->sin6_scope_id = sin6from->sin6_scope_id;
+       if (paddr->sa.sa_family == AF_INET) {
+               addr->v4.sin_family = AF_INET;
+               addr->v4.sin_port = htons(asoc->peer.port);
+               addr->v4.sin_addr = paddr->v4.sin_addr;
+       } else {
+               addr->v6.sin6_family = AF_INET6;
+               addr->v6.sin6_flowinfo = 0;
+               if (ipv6_addr_type(&paddr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL)
+                       addr->v6.sin6_scope_id = paddr->v6.sin6_scope_id;
+               else
+                       addr->v6.sin6_scope_id = 0;
+               addr->v6.sin6_port = htons(asoc->peer.port);
+               addr->v6.sin6_addr = paddr->v6.sin6_addr;
        }
+
+       *addrlen = sctp_v6_addr_to_user(sctp_sk(asoc->base.sk), addr);
 }
 
 /* Initialize a msg_name from an inbound skb. */
 static void sctp_inet6_skb_msgname(struct sk_buff *skb, char *msgname,
                                   int *addr_len)
 {
+       union sctp_addr *addr;
        struct sctphdr *sh;
-       struct sockaddr_in6 *sin6;
-
-       if (msgname) {
-               sctp_inet6_msgname(msgname, addr_len);
-               sin6 = (struct sockaddr_in6 *)msgname;
-               sh = sctp_hdr(skb);
-               sin6->sin6_port = sh->source;
-
-               /* Map ipv4 address into v4-mapped-on-v6 address. */
-               if (sctp_sk(skb->sk)->v4mapped &&
-                   ip_hdr(skb)->version == 4) {
-                       sctp_v4_map_v6((union sctp_addr *)sin6);
-                       sin6->sin6_addr.s6_addr32[3] = ip_hdr(skb)->saddr;
-                       return;
-               }
 
-               /* Otherwise, just copy the v6 address. */
-               sin6->sin6_addr = ipv6_hdr(skb)->saddr;
-               if (ipv6_addr_type(&sin6->sin6_addr) & IPV6_ADDR_LINKLOCAL) {
+       if (!msgname)
+               return;
+
+       addr = (union sctp_addr *)msgname;
+       sh = sctp_hdr(skb);
+
+       if (ip_hdr(skb)->version == 4) {
+               addr->v4.sin_family = AF_INET;
+               addr->v4.sin_port = sh->source;
+               addr->v4.sin_addr.s_addr =  ip_hdr(skb)->saddr;
+       } else {
+               addr->v6.sin6_family = AF_INET6;
+               addr->v6.sin6_flowinfo = 0;
+               addr->v6.sin6_port = sh->source;
+               addr->v6.sin6_addr = ipv6_hdr(skb)->saddr;
+               if (ipv6_addr_type(&addr->v6.sin6_addr) & IPV6_ADDR_LINKLOCAL) {
                        struct sctp_ulpevent *ev = sctp_skb2event(skb);
-                       sin6->sin6_scope_id = ev->iif;
+                       addr->v6.sin6_scope_id = ev->iif;
                }
        }
+
+       *addr_len = sctp_v6_addr_to_user(sctp_sk(skb->sk), addr);
 }
 
 /* Do we support this AF? */
@@ -857,9 +851,6 @@ static int sctp_inet6_bind_verify(struct sctp_sock *opt, union sctp_addr *addr)
                                return 0;
                        }
                        rcu_read_unlock();
-               } else if (type == IPV6_ADDR_MAPPED) {
-                       if (!opt->v4mapped)
-                               return 0;
                }
 
                af = opt->pf->af;
@@ -914,6 +905,23 @@ static int sctp_inet6_supported_addrs(const struct sctp_sock *opt,
        return 1;
 }
 
+/* Handle SCTP_I_WANT_MAPPED_V4_ADDR for getpeername() and getsockname() */
+static int sctp_getname(struct socket *sock, struct sockaddr *uaddr,
+                       int *uaddr_len, int peer)
+{
+       int rc;
+
+       rc = inet6_getname(sock, uaddr, uaddr_len, peer);
+
+       if (rc != 0)
+               return rc;
+
+       *uaddr_len = sctp_v6_addr_to_user(sctp_sk(sock->sk),
+                                         (union sctp_addr *)uaddr);
+
+       return rc;
+}
+
 static const struct proto_ops inet6_seqpacket_ops = {
        .family            = PF_INET6,
        .owner             = THIS_MODULE,
@@ -922,7 +930,7 @@ static const struct proto_ops inet6_seqpacket_ops = {
        .connect           = inet_dgram_connect,
        .socketpair        = sock_no_socketpair,
        .accept            = inet_accept,
-       .getname           = inet6_getname,
+       .getname           = sctp_getname,
        .poll              = sctp_poll,
        .ioctl             = inet6_ioctl,
        .listen            = sctp_inet_listen,
@@ -974,8 +982,6 @@ static struct sctp_af sctp_af_inet6 = {
        .copy_addrlist     = sctp_v6_copy_addrlist,
        .from_skb          = sctp_v6_from_skb,
        .from_sk           = sctp_v6_from_sk,
-       .to_sk_saddr       = sctp_v6_to_sk_saddr,
-       .to_sk_daddr       = sctp_v6_to_sk_daddr,
        .from_addr_param   = sctp_v6_from_addr_param,
        .to_addr_param     = sctp_v6_to_addr_param,
        .cmp_addr          = sctp_v6_cmp_addr,
@@ -1005,7 +1011,9 @@ static struct sctp_pf sctp_pf_inet6 = {
        .send_verify   = sctp_inet6_send_verify,
        .supported_addrs = sctp_inet6_supported_addrs,
        .create_accept_sk = sctp_v6_create_accept_sk,
-       .addr_v4map    = sctp_v6_addr_v4map,
+       .addr_to_user  = sctp_v6_addr_to_user,
+       .to_sk_saddr   = sctp_v6_to_sk_saddr,
+       .to_sk_daddr   = sctp_v6_to_sk_daddr,
        .af            = &sctp_af_inet6,
 };
 
index 01ab8e0..1eedba5 100644 (file)
@@ -178,7 +178,7 @@ sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
 
        case SCTP_XMIT_RWND_FULL:
        case SCTP_XMIT_OK:
-       case SCTP_XMIT_NAGLE_DELAY:
+       case SCTP_XMIT_DELAY:
                break;
        }
 
@@ -633,7 +633,6 @@ nomem:
 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
                                           struct sctp_chunk *chunk)
 {
-       sctp_xmit_t retval = SCTP_XMIT_OK;
        size_t datasize, rwnd, inflight, flight_size;
        struct sctp_transport *transport = packet->transport;
        struct sctp_association *asoc = transport->asoc;
@@ -658,15 +657,11 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
 
        datasize = sctp_data_size(chunk);
 
-       if (datasize > rwnd) {
-               if (inflight > 0) {
-                       /* We have (at least) one data chunk in flight,
-                        * so we can't fall back to rule 6.1 B).
-                        */
-                       retval = SCTP_XMIT_RWND_FULL;
-                       goto finish;
-               }
-       }
+       if (datasize > rwnd && inflight > 0)
+               /* We have (at least) one data chunk in flight,
+                * so we can't fall back to rule 6.1 B).
+                */
+               return SCTP_XMIT_RWND_FULL;
 
        /* RFC 2960 6.1  Transmission of DATA Chunks
         *
@@ -680,36 +675,44 @@ static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
         *    When a Fast Retransmit is being performed the sender SHOULD
         *    ignore the value of cwnd and SHOULD NOT delay retransmission.
         */
-       if (chunk->fast_retransmit != SCTP_NEED_FRTX)
-               if (flight_size >= transport->cwnd) {
-                       retval = SCTP_XMIT_RWND_FULL;
-                       goto finish;
-               }
+       if (chunk->fast_retransmit != SCTP_NEED_FRTX &&
+           flight_size >= transport->cwnd)
+               return SCTP_XMIT_RWND_FULL;
 
        /* Nagle's algorithm to solve small-packet problem:
         * Inhibit the sending of new chunks when new outgoing data arrives
         * if any previously transmitted data on the connection remains
         * unacknowledged.
         */
-       if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
-           inflight && sctp_state(asoc, ESTABLISHED)) {
-               unsigned int max = transport->pathmtu - packet->overhead;
-               unsigned int len = chunk->skb->len + q->out_qlen;
-
-               /* Check whether this chunk and all the rest of pending
-                * data will fit or delay in hopes of bundling a full
-                * sized packet.
-                * Don't delay large message writes that may have been
-                * fragmeneted into small peices.
-                */
-               if ((len < max) && chunk->msg->can_delay) {
-                       retval = SCTP_XMIT_NAGLE_DELAY;
-                       goto finish;
-               }
-       }
 
-finish:
-       return retval;
+       if (sctp_sk(asoc->base.sk)->nodelay)
+               /* Nagle disabled */
+               return SCTP_XMIT_OK;
+
+       if (!sctp_packet_empty(packet))
+               /* Append to packet */
+               return SCTP_XMIT_OK;
+
+       if (inflight == 0)
+               /* Nothing unacked */
+               return SCTP_XMIT_OK;
+
+       if (!sctp_state(asoc, ESTABLISHED))
+               return SCTP_XMIT_OK;
+
+       /* Check whether this chunk and all the rest of pending data will fit
+        * or delay in hopes of bundling a full sized packet.
+        */
+       if (chunk->skb->len + q->out_qlen >= transport->pathmtu - packet->overhead)
+               /* Enough data queued to fill a packet */
+               return SCTP_XMIT_OK;
+
+       /* Don't delay large message writes that may have been fragmented */
+       if (!chunk->msg->can_delay)
+               return SCTP_XMIT_OK;
+
+       /* Defer until all data acked or packet full */
+       return SCTP_XMIT_DELAY;
 }
 
 /* This private function does management things when adding DATA chunk */
index 9c77947..7e8f0a1 100644 (file)
@@ -629,7 +629,7 @@ redo:
                        done = 1;
                        break;
 
-               case SCTP_XMIT_NAGLE_DELAY:
+               case SCTP_XMIT_DELAY:
                        /* Send this packet. */
                        error = sctp_packet_transmit(pkt);
 
@@ -1015,7 +1015,7 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
                        switch (status) {
                        case SCTP_XMIT_PMTU_FULL:
                        case SCTP_XMIT_RWND_FULL:
-                       case SCTP_XMIT_NAGLE_DELAY:
+                       case SCTP_XMIT_DELAY:
                                /* We could not append this chunk, so put
                                 * the chunk back on the output queue.
                                 */
@@ -1025,7 +1025,6 @@ static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
 
                                sctp_outq_head_data(q, chunk);
                                goto sctp_flush_out;
-                               break;
 
                        case SCTP_XMIT_OK:
                                /* The sender is in the SHUTDOWN-PENDING state,
index 6789d78..6240834 100644 (file)
@@ -576,10 +576,10 @@ out:
        return newsk;
 }
 
-/* Map address, empty for v4 family */
-static void sctp_v4_addr_v4map(struct sctp_sock *sp, union sctp_addr *addr)
+static int sctp_v4_addr_to_user(struct sctp_sock *sp, union sctp_addr *addr)
 {
-       /* Empty */
+       /* No address mapping for V4 sockets */
+       return sizeof(struct sockaddr_in);
 }
 
 /* Dump the v4 addr to the seq file. */
@@ -976,7 +976,9 @@ static struct sctp_pf sctp_pf_inet = {
        .send_verify   = sctp_inet_send_verify,
        .supported_addrs = sctp_inet_supported_addrs,
        .create_accept_sk = sctp_v4_create_accept_sk,
-       .addr_v4map     = sctp_v4_addr_v4map,
+       .addr_to_user  = sctp_v4_addr_to_user,
+       .to_sk_saddr   = sctp_v4_to_sk_saddr,
+       .to_sk_daddr   = sctp_v4_to_sk_daddr,
        .af            = &sctp_af_inet
 };
 
@@ -1047,8 +1049,6 @@ static struct sctp_af sctp_af_inet = {
        .copy_addrlist     = sctp_v4_copy_addrlist,
        .from_skb          = sctp_v4_from_skb,
        .from_sk           = sctp_v4_from_sk,
-       .to_sk_saddr       = sctp_v4_to_sk_saddr,
-       .to_sk_daddr       = sctp_v4_to_sk_daddr,
        .from_addr_param   = sctp_v4_from_addr_param,
        .to_addr_param     = sctp_v4_to_addr_param,
        .cmp_addr          = sctp_v4_cmp_addr,
index 5170a1f..d3f1ea4 100644 (file)
@@ -4182,7 +4182,6 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
        case SCTP_CID_ACTION_DISCARD:
                /* Discard the packet.  */
                return sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
-               break;
        case SCTP_CID_ACTION_DISCARD_ERR:
                /* Generate an ERROR chunk as response. */
                hdr = unk_chunk->chunk_hdr;
@@ -4198,11 +4197,9 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
                /* Discard the packet.  */
                sctp_sf_pdiscard(net, ep, asoc, type, arg, commands);
                return SCTP_DISPOSITION_CONSUME;
-               break;
        case SCTP_CID_ACTION_SKIP:
                /* Skip the chunk.  */
                return SCTP_DISPOSITION_DISCARD;
-               break;
        case SCTP_CID_ACTION_SKIP_ERR:
                /* Generate an ERROR chunk as response. */
                hdr = unk_chunk->chunk_hdr;
@@ -4216,7 +4213,6 @@ sctp_disposition_t sctp_sf_unk_chunk(struct net *net,
                }
                /* Skip the chunk.  */
                return SCTP_DISPOSITION_CONSUME;
-               break;
        default:
                break;
        }
index 4298996..eb71d49 100644 (file)
@@ -254,7 +254,7 @@ static struct sctp_transport *sctp_addr_id2transport(struct sock *sk,
        if (id_asoc && (id_asoc != addr_asoc))
                return NULL;
 
-       sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
+       sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
                                                (union sctp_addr *)addr);
 
        return transport;
@@ -396,7 +396,7 @@ static int sctp_do_bind(struct sock *sk, union sctp_addr *addr, int len)
        /* Copy back into socket for getsockname() use. */
        if (!ret) {
                inet_sk(sk)->inet_sport = htons(inet_sk(sk)->inet_num);
-               af->to_sk_saddr(addr, sk);
+               sp->pf->to_sk_saddr(addr, sk);
        }
 
        return ret;
@@ -1053,7 +1053,6 @@ static int __sctp_connect(struct sock *sk,
        struct sctp_association *asoc2;
        struct sctp_transport *transport;
        union sctp_addr to;
-       struct sctp_af *af;
        sctp_scope_t scope;
        long timeo;
        int err = 0;
@@ -1081,6 +1080,8 @@ static int __sctp_connect(struct sock *sk,
        /* Walk through the addrs buffer and count the number of addresses. */
        addr_buf = kaddrs;
        while (walk_size < addrs_size) {
+               struct sctp_af *af;
+
                if (walk_size + sizeof(sa_family_t) > addrs_size) {
                        err = -EINVAL;
                        goto out_free;
@@ -1205,8 +1206,7 @@ static int __sctp_connect(struct sock *sk,
 
        /* Initialize sk's dport and daddr for getpeername() */
        inet_sk(sk)->inet_dport = htons(asoc->peer.port);
-       af = sctp_get_af_specific(sa_addr->sa.sa_family);
-       af->to_sk_daddr(sa_addr, sk);
+       sp->pf->to_sk_daddr(sa_addr, sk);
        sk->sk_err = 0;
 
        /* in-kernel sockets don't generally have a file allocated to them
@@ -1602,12 +1602,13 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        struct sctp_initmsg *sinit;
        sctp_assoc_t associd = 0;
        sctp_cmsgs_t cmsgs = { NULL };
-       int err;
        sctp_scope_t scope;
-       long timeo;
-       __u16 sinfo_flags = 0;
+       bool fill_sinfo_ttl = false;
        struct sctp_datamsg *datamsg;
        int msg_flags = msg->msg_flags;
+       __u16 sinfo_flags = 0;
+       long timeo;
+       int err;
 
        err = 0;
        sp = sctp_sk(sk);
@@ -1648,10 +1649,21 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                msg_name = msg->msg_name;
        }
 
-       sinfo = cmsgs.info;
        sinit = cmsgs.init;
+       if (cmsgs.sinfo != NULL) {
+               memset(&default_sinfo, 0, sizeof(default_sinfo));
+               default_sinfo.sinfo_stream = cmsgs.sinfo->snd_sid;
+               default_sinfo.sinfo_flags = cmsgs.sinfo->snd_flags;
+               default_sinfo.sinfo_ppid = cmsgs.sinfo->snd_ppid;
+               default_sinfo.sinfo_context = cmsgs.sinfo->snd_context;
+               default_sinfo.sinfo_assoc_id = cmsgs.sinfo->snd_assoc_id;
 
-       /* Did the user specify SNDRCVINFO?  */
+               sinfo = &default_sinfo;
+               fill_sinfo_ttl = true;
+       } else {
+               sinfo = cmsgs.srinfo;
+       }
+       /* Did the user specify SNDINFO/SNDRCVINFO? */
        if (sinfo) {
                sinfo_flags = sinfo->sinfo_flags;
                associd = sinfo->sinfo_assoc_id;
@@ -1858,8 +1870,8 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
        pr_debug("%s: we have a valid association\n", __func__);
 
        if (!sinfo) {
-               /* If the user didn't specify SNDRCVINFO, make up one with
-                * some defaults.
+               /* If the user didn't specify SNDINFO/SNDRCVINFO, make up
+                * one with some defaults.
                 */
                memset(&default_sinfo, 0, sizeof(default_sinfo));
                default_sinfo.sinfo_stream = asoc->default_stream;
@@ -1868,7 +1880,13 @@ static int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                default_sinfo.sinfo_context = asoc->default_context;
                default_sinfo.sinfo_timetolive = asoc->default_timetolive;
                default_sinfo.sinfo_assoc_id = sctp_assoc2id(asoc);
+
                sinfo = &default_sinfo;
+       } else if (fill_sinfo_ttl) {
+               /* In case SNDINFO was specified, we still need to fill
+                * it with a default ttl from the assoc here.
+                */
+               sinfo->sinfo_timetolive = asoc->default_timetolive;
        }
 
        /* API 7.1.7, the sndbuf size per association bounds the
@@ -2042,8 +2060,6 @@ static int sctp_skb_pull(struct sk_buff *skb, int len)
  *  flags   - flags sent or received with the user message, see Section
  *            5 for complete description of the flags.
  */
-static struct sk_buff *sctp_skb_recv_datagram(struct sock *, int, int, int *);
-
 static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
                        struct msghdr *msg, size_t len, int noblock,
                        int flags, int *addr_len)
@@ -2094,9 +2110,16 @@ static int sctp_recvmsg(struct kiocb *iocb, struct sock *sk,
                sp->pf->skb_msgname(skb, msg->msg_name, addr_len);
        }
 
+       /* Check if we allow SCTP_NXTINFO. */
+       if (sp->recvnxtinfo)
+               sctp_ulpevent_read_nxtinfo(event, msg, sk);
+       /* Check if we allow SCTP_RCVINFO. */
+       if (sp->recvrcvinfo)
+               sctp_ulpevent_read_rcvinfo(event, msg);
        /* Check if we allow SCTP_SNDRCVINFO. */
        if (sp->subscribe.sctp_data_io_event)
                sctp_ulpevent_read_sndrcvinfo(event, msg);
+
 #if 0
        /* FIXME: we should be calling IP/IPv6 layers.  */
        if (sk->sk_protinfo.af_inet.cmsg_flags)
@@ -2182,8 +2205,13 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
        if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
                return -EFAULT;
 
-       /*
-        * At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
+       if (sctp_sk(sk)->subscribe.sctp_data_io_event)
+               pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
+                                   "Requested SCTP_SNDRCVINFO event.\n"
+                                   "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
+                                   current->comm, task_pid_nr(current));
+
+       /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
         * if there is no data to be sent or retransmit, the stack will
         * immediately send up this notification.
         */
@@ -2747,19 +2775,22 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
                                              char __user *optval,
                                              unsigned int optlen)
 {
-       struct sctp_sndrcvinfo info;
-       struct sctp_association *asoc;
        struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndrcvinfo info;
 
-       if (optlen != sizeof(struct sctp_sndrcvinfo))
+       if (optlen != sizeof(info))
                return -EINVAL;
        if (copy_from_user(&info, optval, optlen))
                return -EFAULT;
+       if (info.sinfo_flags &
+           ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+             SCTP_ABORT | SCTP_EOF))
+               return -EINVAL;
 
        asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
        if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
                return -EINVAL;
-
        if (asoc) {
                asoc->default_stream = info.sinfo_stream;
                asoc->default_flags = info.sinfo_flags;
@@ -2777,6 +2808,44 @@ static int sctp_setsockopt_default_send_param(struct sock *sk,
        return 0;
 }
 
+/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
+ * (SCTP_DEFAULT_SNDINFO)
+ */
+static int sctp_setsockopt_default_sndinfo(struct sock *sk,
+                                          char __user *optval,
+                                          unsigned int optlen)
+{
+       struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndinfo info;
+
+       if (optlen != sizeof(info))
+               return -EINVAL;
+       if (copy_from_user(&info, optval, optlen))
+               return -EFAULT;
+       if (info.snd_flags &
+           ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+             SCTP_ABORT | SCTP_EOF))
+               return -EINVAL;
+
+       asoc = sctp_id2assoc(sk, info.snd_assoc_id);
+       if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
+               return -EINVAL;
+       if (asoc) {
+               asoc->default_stream = info.snd_sid;
+               asoc->default_flags = info.snd_flags;
+               asoc->default_ppid = info.snd_ppid;
+               asoc->default_context = info.snd_context;
+       } else {
+               sp->default_stream = info.snd_sid;
+               sp->default_flags = info.snd_flags;
+               sp->default_ppid = info.snd_ppid;
+               sp->default_context = info.snd_context;
+       }
+
+       return 0;
+}
+
 /* 7.1.10 Set Primary Address (SCTP_PRIMARY_ADDR)
  *
  * Requests that the local SCTP stack use the enclosed peer address as
@@ -3523,7 +3592,6 @@ static int sctp_setsockopt_auto_asconf(struct sock *sk, char __user *optval,
        return 0;
 }
 
-
 /*
  * SCTP_PEER_ADDR_THLDS
  *
@@ -3574,6 +3642,38 @@ static int sctp_setsockopt_paddr_thresholds(struct sock *sk,
        return 0;
 }
 
+static int sctp_setsockopt_recvrcvinfo(struct sock *sk,
+                                      char __user *optval,
+                                      unsigned int optlen)
+{
+       int val;
+
+       if (optlen < sizeof(int))
+               return -EINVAL;
+       if (get_user(val, (int __user *) optval))
+               return -EFAULT;
+
+       sctp_sk(sk)->recvrcvinfo = (val == 0) ? 0 : 1;
+
+       return 0;
+}
+
+static int sctp_setsockopt_recvnxtinfo(struct sock *sk,
+                                      char __user *optval,
+                                      unsigned int optlen)
+{
+       int val;
+
+       if (optlen < sizeof(int))
+               return -EINVAL;
+       if (get_user(val, (int __user *) optval))
+               return -EFAULT;
+
+       sctp_sk(sk)->recvnxtinfo = (val == 0) ? 0 : 1;
+
+       return 0;
+}
+
 /* API 6.2 setsockopt(), getsockopt()
  *
  * Applications use setsockopt() and getsockopt() to set or retrieve
@@ -3671,6 +3771,9 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
                retval = sctp_setsockopt_default_send_param(sk, optval,
                                                            optlen);
                break;
+       case SCTP_DEFAULT_SNDINFO:
+               retval = sctp_setsockopt_default_sndinfo(sk, optval, optlen);
+               break;
        case SCTP_PRIMARY_ADDR:
                retval = sctp_setsockopt_primary_addr(sk, optval, optlen);
                break;
@@ -3725,6 +3828,12 @@ static int sctp_setsockopt(struct sock *sk, int level, int optname,
        case SCTP_PEER_ADDR_THLDS:
                retval = sctp_setsockopt_paddr_thresholds(sk, optval, optlen);
                break;
+       case SCTP_RECVRCVINFO:
+               retval = sctp_setsockopt_recvrcvinfo(sk, optval, optlen);
+               break;
+       case SCTP_RECVNXTINFO:
+               retval = sctp_setsockopt_recvnxtinfo(sk, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -3971,6 +4080,9 @@ static int sctp_init_sock(struct sock *sk)
        /* Enable Nagle algorithm by default.  */
        sp->nodelay           = 0;
 
+       sp->recvrcvinfo = 0;
+       sp->recvnxtinfo = 0;
+
        /* Enable by default. */
        sp->v4mapped          = 1;
 
@@ -4143,7 +4255,7 @@ static int sctp_getsockopt_sctp_status(struct sock *sk, int len,
        memcpy(&status.sstat_primary.spinfo_address, &transport->ipaddr,
                        transport->af_specific->sockaddr_len);
        /* Map ipv4 address into v4-mapped-on-v6 address.  */
-       sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
+       sctp_get_pf_specific(sk->sk_family)->addr_to_user(sctp_sk(sk),
                (union sctp_addr *)&status.sstat_primary.spinfo_address);
        status.sstat_primary.spinfo_state = transport->state;
        status.sstat_primary.spinfo_cwnd = transport->cwnd;
@@ -4301,8 +4413,8 @@ static int sctp_getsockopt_autoclose(struct sock *sk, int len, char __user *optv
 int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
 {
        struct sctp_association *asoc = sctp_id2assoc(sk, id);
+       struct sctp_sock *sp = sctp_sk(sk);
        struct socket *sock;
-       struct sctp_af *af;
        int err = 0;
 
        if (!asoc)
@@ -4324,8 +4436,7 @@ int sctp_do_peeloff(struct sock *sk, sctp_assoc_t id, struct socket **sockp)
        /* Make peeled-off sockets more like 1-1 accepted sockets.
         * Set the daddr and initialize id to something more random
         */
-       af = sctp_get_af_specific(asoc->peer.primary_addr.sa.sa_family);
-       af->to_sk_daddr(&asoc->peer.primary_addr, sk);
+       sp->pf->to_sk_daddr(&asoc->peer.primary_addr, sk);
 
        /* Populate the fields of the newsk from the oldsk and migrate the
         * asoc to the newsk.
@@ -4709,8 +4820,8 @@ static int sctp_getsockopt_peer_addrs(struct sock *sk, int len,
        list_for_each_entry(from, &asoc->peer.transport_addr_list,
                                transports) {
                memcpy(&temp, &from->ipaddr, sizeof(temp));
-               sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
-               addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
+               addrlen = sctp_get_pf_specific(sk->sk_family)
+                             ->addr_to_user(sp, &temp);
                if (space_left < addrlen)
                        return -ENOMEM;
                if (copy_to_user(to, &temp, addrlen))
@@ -4754,9 +4865,9 @@ static int sctp_copy_laddrs(struct sock *sk, __u16 port, void *to,
                if (!temp.v4.sin_port)
                        temp.v4.sin_port = htons(port);
 
-               sctp_get_pf_specific(sk->sk_family)->addr_v4map(sctp_sk(sk),
-                                                               &temp);
-               addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
+               addrlen = sctp_get_pf_specific(sk->sk_family)
+                             ->addr_to_user(sctp_sk(sk), &temp);
+
                if (space_left < addrlen) {
                        cnt =  -ENOMEM;
                        break;
@@ -4844,8 +4955,8 @@ static int sctp_getsockopt_local_addrs(struct sock *sk, int len,
         */
        list_for_each_entry(addr, &bp->address_list, list) {
                memcpy(&temp, &addr->a, sizeof(temp));
-               sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp, &temp);
-               addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len;
+               addrlen = sctp_get_pf_specific(sk->sk_family)
+                             ->addr_to_user(sp, &temp);
                if (space_left < addrlen) {
                        err =  -ENOMEM; /*fixme: right error?*/
                        goto out;
@@ -4904,7 +5015,7 @@ static int sctp_getsockopt_primary_addr(struct sock *sk, int len,
        memcpy(&prim.ssp_addr, &asoc->peer.primary_path->ipaddr,
                asoc->peer.primary_path->af_specific->sockaddr_len);
 
-       sctp_get_pf_specific(sk->sk_family)->addr_v4map(sp,
+       sctp_get_pf_specific(sk->sk_family)->addr_to_user(sp,
                        (union sctp_addr *)&prim.ssp_addr);
 
        if (put_user(len, optlen))
@@ -4964,14 +5075,14 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
                                        int len, char __user *optval,
                                        int __user *optlen)
 {
-       struct sctp_sndrcvinfo info;
-       struct sctp_association *asoc;
        struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndrcvinfo info;
 
-       if (len < sizeof(struct sctp_sndrcvinfo))
+       if (len < sizeof(info))
                return -EINVAL;
 
-       len = sizeof(struct sctp_sndrcvinfo);
+       len = sizeof(info);
 
        if (copy_from_user(&info, optval, len))
                return -EFAULT;
@@ -4979,7 +5090,6 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
        asoc = sctp_id2assoc(sk, info.sinfo_assoc_id);
        if (!asoc && info.sinfo_assoc_id && sctp_style(sk, UDP))
                return -EINVAL;
-
        if (asoc) {
                info.sinfo_stream = asoc->default_stream;
                info.sinfo_flags = asoc->default_flags;
@@ -5002,6 +5112,48 @@ static int sctp_getsockopt_default_send_param(struct sock *sk,
        return 0;
 }
 
+/* RFC6458, Section 8.1.31. Set/get Default Send Parameters
+ * (SCTP_DEFAULT_SNDINFO)
+ */
+static int sctp_getsockopt_default_sndinfo(struct sock *sk, int len,
+                                          char __user *optval,
+                                          int __user *optlen)
+{
+       struct sctp_sock *sp = sctp_sk(sk);
+       struct sctp_association *asoc;
+       struct sctp_sndinfo info;
+
+       if (len < sizeof(info))
+               return -EINVAL;
+
+       len = sizeof(info);
+
+       if (copy_from_user(&info, optval, len))
+               return -EFAULT;
+
+       asoc = sctp_id2assoc(sk, info.snd_assoc_id);
+       if (!asoc && info.snd_assoc_id && sctp_style(sk, UDP))
+               return -EINVAL;
+       if (asoc) {
+               info.snd_sid = asoc->default_stream;
+               info.snd_flags = asoc->default_flags;
+               info.snd_ppid = asoc->default_ppid;
+               info.snd_context = asoc->default_context;
+       } else {
+               info.snd_sid = sp->default_stream;
+               info.snd_flags = sp->default_flags;
+               info.snd_ppid = sp->default_ppid;
+               info.snd_context = sp->default_context;
+       }
+
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &info, len))
+               return -EFAULT;
+
+       return 0;
+}
+
 /*
  *
  * 7.1.5 SCTP_NODELAY
@@ -5752,6 +5904,46 @@ static int sctp_getsockopt_assoc_stats(struct sock *sk, int len,
        return 0;
 }
 
+static int sctp_getsockopt_recvrcvinfo(struct sock *sk,        int len,
+                                      char __user *optval,
+                                      int __user *optlen)
+{
+       int val = 0;
+
+       if (len < sizeof(int))
+               return -EINVAL;
+
+       len = sizeof(int);
+       if (sctp_sk(sk)->recvrcvinfo)
+               val = 1;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return 0;
+}
+
+static int sctp_getsockopt_recvnxtinfo(struct sock *sk,        int len,
+                                      char __user *optval,
+                                      int __user *optlen)
+{
+       int val = 0;
+
+       if (len < sizeof(int))
+               return -EINVAL;
+
+       len = sizeof(int);
+       if (sctp_sk(sk)->recvnxtinfo)
+               val = 1;
+       if (put_user(len, optlen))
+               return -EFAULT;
+       if (copy_to_user(optval, &val, len))
+               return -EFAULT;
+
+       return 0;
+}
+
 static int sctp_getsockopt(struct sock *sk, int level, int optname,
                           char __user *optval, int __user *optlen)
 {
@@ -5821,6 +6013,10 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
                retval = sctp_getsockopt_default_send_param(sk, len,
                                                            optval, optlen);
                break;
+       case SCTP_DEFAULT_SNDINFO:
+               retval = sctp_getsockopt_default_sndinfo(sk, len,
+                                                        optval, optlen);
+               break;
        case SCTP_PRIMARY_ADDR:
                retval = sctp_getsockopt_primary_addr(sk, len, optval, optlen);
                break;
@@ -5895,6 +6091,12 @@ static int sctp_getsockopt(struct sock *sk, int level, int optname,
        case SCTP_GET_ASSOC_STATS:
                retval = sctp_getsockopt_assoc_stats(sk, len, optval, optlen);
                break;
+       case SCTP_RECVRCVINFO:
+               retval = sctp_getsockopt_recvrcvinfo(sk, len, optval, optlen);
+               break;
+       case SCTP_RECVNXTINFO:
+               retval = sctp_getsockopt_recvnxtinfo(sk, len, optval, optlen);
+               break;
        default:
                retval = -ENOPROTOOPT;
                break;
@@ -6390,8 +6592,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
        struct cmsghdr *cmsg;
        struct msghdr *my_msg = (struct msghdr *)msg;
 
-       for (cmsg = CMSG_FIRSTHDR(msg);
-            cmsg != NULL;
+       for (cmsg = CMSG_FIRSTHDR(msg); cmsg != NULL;
             cmsg = CMSG_NXTHDR(my_msg, cmsg)) {
                if (!CMSG_OK(my_msg, cmsg))
                        return -EINVAL;
@@ -6404,7 +6605,7 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
                switch (cmsg->cmsg_type) {
                case SCTP_INIT:
                        /* SCTP Socket API Extension
-                        * 5.2.1 SCTP Initiation Structure (SCTP_INIT)
+                        * 5.3.1 SCTP Initiation Structure (SCTP_INIT)
                         *
                         * This cmsghdr structure provides information for
                         * initializing new SCTP associations with sendmsg().
@@ -6416,15 +6617,15 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
                         * ------------  ------------   ----------------------
                         * IPPROTO_SCTP  SCTP_INIT      struct sctp_initmsg
                         */
-                       if (cmsg->cmsg_len !=
-                           CMSG_LEN(sizeof(struct sctp_initmsg)))
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_initmsg)))
                                return -EINVAL;
-                       cmsgs->init = (struct sctp_initmsg *)CMSG_DATA(cmsg);
+
+                       cmsgs->init = CMSG_DATA(cmsg);
                        break;
 
                case SCTP_SNDRCV:
                        /* SCTP Socket API Extension
-                        * 5.2.2 SCTP Header Information Structure(SCTP_SNDRCV)
+                        * 5.3.2 SCTP Header Information Structure(SCTP_SNDRCV)
                         *
                         * This cmsghdr structure specifies SCTP options for
                         * sendmsg() and describes SCTP header information
@@ -6434,24 +6635,44 @@ static int sctp_msghdr_parse(const struct msghdr *msg, sctp_cmsgs_t *cmsgs)
                         * ------------  ------------   ----------------------
                         * IPPROTO_SCTP  SCTP_SNDRCV    struct sctp_sndrcvinfo
                         */
-                       if (cmsg->cmsg_len !=
-                           CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndrcvinfo)))
                                return -EINVAL;
 
-                       cmsgs->info =
-                               (struct sctp_sndrcvinfo *)CMSG_DATA(cmsg);
+                       cmsgs->srinfo = CMSG_DATA(cmsg);
 
-                       /* Minimally, validate the sinfo_flags. */
-                       if (cmsgs->info->sinfo_flags &
+                       if (cmsgs->srinfo->sinfo_flags &
                            ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
                              SCTP_ABORT | SCTP_EOF))
                                return -EINVAL;
                        break;
 
+               case SCTP_SNDINFO:
+                       /* SCTP Socket API Extension
+                        * 5.3.4 SCTP Send Information Structure (SCTP_SNDINFO)
+                        *
+                        * This cmsghdr structure specifies SCTP options for
+                        * sendmsg(). This structure and SCTP_RCVINFO replaces
+                        * SCTP_SNDRCV which has been deprecated.
+                        *
+                        * cmsg_level    cmsg_type      cmsg_data[]
+                        * ------------  ------------   ---------------------
+                        * IPPROTO_SCTP  SCTP_SNDINFO    struct sctp_sndinfo
+                        */
+                       if (cmsg->cmsg_len != CMSG_LEN(sizeof(struct sctp_sndinfo)))
+                               return -EINVAL;
+
+                       cmsgs->sinfo = CMSG_DATA(cmsg);
+
+                       if (cmsgs->sinfo->snd_flags &
+                           ~(SCTP_UNORDERED | SCTP_ADDR_OVER |
+                             SCTP_ABORT | SCTP_EOF))
+                               return -EINVAL;
+                       break;
                default:
                        return -EINVAL;
                }
        }
+
        return 0;
 }
 
@@ -6518,8 +6739,8 @@ out:
  * Note: This is pretty much the same routine as in core/datagram.c
  * with a few changes to make lksctp work.
  */
-static struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
-                                             int noblock, int *err)
+struct sk_buff *sctp_skb_recv_datagram(struct sock *sk, int flags,
+                                      int noblock, int *err)
 {
        int error;
        struct sk_buff *skb;
index dcb1959..2e9ada1 100644 (file)
@@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       char tmp[8];
        struct ctl_table tbl;
-       int ret;
-       int changed = 0;
+       bool changed = false;
        char *none = "none";
+       char tmp[8];
+       int ret;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
 
        if (write) {
                tbl.data = tmp;
-               tbl.maxlen = 8;
+               tbl.maxlen = sizeof(tmp);
        } else {
                tbl.data = net->sctp.sctp_hmac_alg ? : none;
                tbl.maxlen = strlen(tbl.data);
        }
-               ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
 
-       if (write) {
+       ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
+       if (write && ret == 0) {
 #ifdef CONFIG_CRYPTO_MD5
                if (!strncmp(tmp, "md5", 3)) {
                        net->sctp.sctp_hmac_alg = "md5";
-                       changed = 1;
+                       changed = true;
                }
 #endif
 #ifdef CONFIG_CRYPTO_SHA1
                if (!strncmp(tmp, "sha1", 4)) {
                        net->sctp.sctp_hmac_alg = "sha1";
-                       changed = 1;
+                       changed = true;
                }
 #endif
                if (!strncmp(tmp, "none", 4)) {
                        net->sctp.sctp_hmac_alg = NULL;
-                       changed = 1;
+                       changed = true;
                }
-
                if (!changed)
                        ret = -EINVAL;
        }
@@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       int new_value;
-       struct ctl_table tbl;
        unsigned int min = *(unsigned int *) ctl->extra1;
        unsigned int max = *(unsigned int *) ctl->extra2;
-       int ret;
+       struct ctl_table tbl;
+       int ret, new_value;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
        tbl.maxlen = sizeof(unsigned int);
@@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
                tbl.data = &new_value;
        else
                tbl.data = &net->sctp.rto_min;
+
        ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
-       if (write) {
-               if (ret || new_value > max || new_value < min)
+       if (write && ret == 0) {
+               if (new_value > max || new_value < min)
                        return -EINVAL;
+
                net->sctp.rto_min = new_value;
        }
+
        return ret;
 }
 
@@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
                                loff_t *ppos)
 {
        struct net *net = current->nsproxy->net_ns;
-       int new_value;
-       struct ctl_table tbl;
        unsigned int min = *(unsigned int *) ctl->extra1;
        unsigned int max = *(unsigned int *) ctl->extra2;
-       int ret;
+       struct ctl_table tbl;
+       int ret, new_value;
 
        memset(&tbl, 0, sizeof(struct ctl_table));
        tbl.maxlen = sizeof(unsigned int);
@@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
                tbl.data = &new_value;
        else
                tbl.data = &net->sctp.rto_max;
+
        ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
-       if (write) {
-               if (ret || new_value > max || new_value < min)
+       if (write && ret == 0) {
+               if (new_value > max || new_value < min)
                        return -EINVAL;
+
                net->sctp.rto_max = new_value;
        }
+
        return ret;
 }
 
@@ -421,8 +424,9 @@ static int proc_sctp_do_alpha_beta(struct ctl_table *ctl, int write,
                                   void __user *buffer, size_t *lenp,
                                   loff_t *ppos)
 {
-       pr_warn_once("Changing rto_alpha or rto_beta may lead to "
-                    "suboptimal rtt/srtt estimations!\n");
+       if (write)
+               pr_warn_once("Changing rto_alpha or rto_beta may lead to "
+                            "suboptimal rtt/srtt estimations!\n");
 
        return proc_dointvec_minmax(ctl, write, buffer, lenp, ppos);
 }
@@ -444,8 +448,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
                tbl.data = &net->sctp.auth_enable;
 
        ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
-
-       if (write) {
+       if (write && ret == 0) {
                struct sock *sk = net->sctp.ctl_sock;
 
                net->sctp.auth_enable = new_value;
index 7dd672f..a0a4318 100644 (file)
@@ -289,8 +289,8 @@ void sctp_transport_route(struct sctp_transport *transport,
                 */
                if (asoc && (!asoc->peer.primary_path ||
                                (transport == asoc->peer.active_path)))
-                       opt->pf->af->to_sk_saddr(&transport->saddr,
-                                                asoc->base.sk);
+                       opt->pf->to_sk_saddr(&transport->saddr,
+                                            asoc->base.sk);
        } else
                transport->pathmtu = SCTP_DEFAULT_MAXSEGMENT;
 }
@@ -594,15 +594,16 @@ void sctp_transport_burst_reset(struct sctp_transport *t)
 }
 
 /* What is the next timeout value for this transport? */
-unsigned long sctp_transport_timeout(struct sctp_transport *t)
+unsigned long sctp_transport_timeout(struct sctp_transport *trans)
 {
-       unsigned long timeout;
-       timeout = t->rto + sctp_jitter(t->rto);
-       if ((t->state != SCTP_UNCONFIRMED) &&
-           (t->state != SCTP_PF))
-               timeout += t->hbinterval;
-       timeout += jiffies;
-       return timeout;
+       /* RTO + timer slack +/- 50% of RTO */
+       unsigned long timeout = (trans->rto >> 1) + prandom_u32_max(trans->rto);
+
+       if (trans->state != SCTP_UNCONFIRMED &&
+           trans->state != SCTP_PF)
+               timeout += trans->hbinterval;
+
+       return timeout + jiffies;
 }
 
 /* Reset transport variables to their initial values */
index 85c6465..d1e3830 100644 (file)
@@ -341,7 +341,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
        memcpy(&spc->spc_aaddr, aaddr, sizeof(struct sockaddr_storage));
 
        /* Map ipv4 address into v4-mapped-on-v6 address.  */
-       sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_v4map(
+       sctp_get_pf_specific(asoc->base.sk->sk_family)->addr_to_user(
                                        sctp_sk(asoc->base.sk),
                                        (union sctp_addr *)&spc->spc_aaddr);
 
@@ -366,9 +366,10 @@ fail:
  * specification [SCTP] and any extensions for a list of possible
  * error formats.
  */
-struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
-       const struct sctp_association *asoc, struct sctp_chunk *chunk,
-       __u16 flags, gfp_t gfp)
+struct sctp_ulpevent *
+sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
+                               struct sctp_chunk *chunk, __u16 flags,
+                               gfp_t gfp)
 {
        struct sctp_ulpevent *event;
        struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        /* Copy the skb to a new skb with room for us to prepend
         * notification with.
         */
-       skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error),
-                             0, gfp);
+       skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
 
        /* Pull off the rest of the cause TLV from the chunk.  */
        skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
        event = sctp_skb2event(skb);
        sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
 
-       sre = (struct sctp_remote_error *)
-               skb_push(skb, sizeof(struct sctp_remote_error));
+       sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
 
        /* Trim the buffer to the right length.  */
-       skb_trim(skb, sizeof(struct sctp_remote_error) + elen);
+       skb_trim(skb, sizeof(*sre) + elen);
 
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_type:
-        *   It should be SCTP_REMOTE_ERROR.
-        */
+       /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
+       memset(sre, 0, sizeof(*sre));
        sre->sre_type = SCTP_REMOTE_ERROR;
-
-       /*
-        * Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_flags: 16 bits (unsigned integer)
-        *   Currently unused.
-        */
        sre->sre_flags = 0;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_length: sizeof (__u32)
-        *
-        * This field is the total length of the notification data,
-        * including the notification header.
-        */
        sre->sre_length = skb->len;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_error: 16 bits (unsigned integer)
-        * This value represents one of the Operational Error causes defined in
-        * the SCTP specification, in network byte order.
-        */
        sre->sre_error = cause;
-
-       /* Socket Extensions for SCTP
-        * 5.3.1.3 SCTP_REMOTE_ERROR
-        *
-        * sre_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association id field, holds the identifier for the association.
-        * All notifications for a given association have the same association
-        * identifier.  For TCP style socket, this field is ignored.
-        */
        sctp_ulpevent_set_owner(event, asoc);
        sre->sre_assoc_id = sctp_assoc2id(asoc);
 
        return event;
-
 fail:
        return NULL;
 }
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
        return notification->sn_header.sn_type;
 }
 
-/* Copy out the sndrcvinfo into a msghdr.  */
+/* RFC6458, Section 5.3.2. SCTP Header Information Structure
+ * (SCTP_SNDRCV, DEPRECATED)
+ */
 void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
                                   struct msghdr *msghdr)
 {
@@ -908,74 +869,84 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
        if (sctp_ulpevent_is_notification(event))
                return;
 
-       /* Sockets API Extensions for SCTP
-        * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
-        *
-        * sinfo_stream: 16 bits (unsigned integer)
-        *
-        * For recvmsg() the SCTP stack places the message's stream number in
-        * this value.
-       */
+       memset(&sinfo, 0, sizeof(sinfo));
        sinfo.sinfo_stream = event->stream;
-       /* sinfo_ssn: 16 bits (unsigned integer)
-        *
-        * For recvmsg() this value contains the stream sequence number that
-        * the remote endpoint placed in the DATA chunk.  For fragmented
-        * messages this is the same number for all deliveries of the message
-        * (if more than one recvmsg() is needed to read the message).
-        */
        sinfo.sinfo_ssn = event->ssn;
-       /* sinfo_ppid: 32 bits (unsigned integer)
-        *
-        * In recvmsg() this value is
-        * the same information that was passed by the upper layer in the peer
-        * application.  Please note that byte order issues are NOT accounted
-        * for and this information is passed opaquely by the SCTP stack from
-        * one end to the other.
-        */
        sinfo.sinfo_ppid = event->ppid;
-       /* sinfo_flags: 16 bits (unsigned integer)
-        *
-        * This field may contain any of the following flags and is composed of
-        * a bitwise OR of these values.
-        *
-        * recvmsg() flags:
-        *
-        * SCTP_UNORDERED - This flag is present when the message was sent
-        *                 non-ordered.
-        */
        sinfo.sinfo_flags = event->flags;
-       /* sinfo_tsn: 32 bit (unsigned integer)
-        *
-        * For the receiving side, this field holds a TSN that was
-        * assigned to one of the SCTP Data Chunks.
-        */
        sinfo.sinfo_tsn = event->tsn;
-       /* sinfo_cumtsn: 32 bit (unsigned integer)
-        *
-        * This field will hold the current cumulative TSN as
-        * known by the underlying SCTP layer.  Note this field is
-        * ignored when sending and only valid for a receive
-        * operation when sinfo_flags are set to SCTP_UNORDERED.
-        */
        sinfo.sinfo_cumtsn = event->cumtsn;
-       /* sinfo_assoc_id: sizeof (sctp_assoc_t)
-        *
-        * The association handle field, sinfo_assoc_id, holds the identifier
-        * for the association announced in the COMMUNICATION_UP notification.
-        * All notifications for a given association have the same identifier.
-        * Ignored for one-to-one style sockets.
-        */
        sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
-
-       /* context value that is set via SCTP_CONTEXT socket option. */
+       /* Context value that is set via SCTP_CONTEXT socket option. */
        sinfo.sinfo_context = event->asoc->default_rcv_context;
-
        /* These fields are not used while receiving. */
        sinfo.sinfo_timetolive = 0;
 
        put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
-                sizeof(struct sctp_sndrcvinfo), (void *)&sinfo);
+                sizeof(sinfo), &sinfo);
+}
+
+/* RFC6458, Section 5.3.5 SCTP Receive Information Structure
+ * (SCTP_SNDRCV)
+ */
+void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *msghdr)
+{
+       struct sctp_rcvinfo rinfo;
+
+       if (sctp_ulpevent_is_notification(event))
+               return;
+
+       memset(&rinfo, 0, sizeof(struct sctp_rcvinfo));
+       rinfo.rcv_sid = event->stream;
+       rinfo.rcv_ssn = event->ssn;
+       rinfo.rcv_ppid = event->ppid;
+       rinfo.rcv_flags = event->flags;
+       rinfo.rcv_tsn = event->tsn;
+       rinfo.rcv_cumtsn = event->cumtsn;
+       rinfo.rcv_assoc_id = sctp_assoc2id(event->asoc);
+       rinfo.rcv_context = event->asoc->default_rcv_context;
+
+       put_cmsg(msghdr, IPPROTO_SCTP, SCTP_RCVINFO,
+                sizeof(rinfo), &rinfo);
+}
+
+/* RFC6458, Section 5.3.6. SCTP Next Receive Information Structure
+ * (SCTP_NXTINFO)
+ */
+static void __sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+                                        struct msghdr *msghdr,
+                                        const struct sk_buff *skb)
+{
+       struct sctp_nxtinfo nxtinfo;
+
+       memset(&nxtinfo, 0, sizeof(nxtinfo));
+       nxtinfo.nxt_sid = event->stream;
+       nxtinfo.nxt_ppid = event->ppid;
+       nxtinfo.nxt_flags = event->flags;
+       if (sctp_ulpevent_is_notification(event))
+               nxtinfo.nxt_flags |= SCTP_NOTIFICATION;
+       nxtinfo.nxt_length = skb->len;
+       nxtinfo.nxt_assoc_id = sctp_assoc2id(event->asoc);
+
+       put_cmsg(msghdr, IPPROTO_SCTP, SCTP_NXTINFO,
+                sizeof(nxtinfo), &nxtinfo);
+}
+
+void sctp_ulpevent_read_nxtinfo(const struct sctp_ulpevent *event,
+                               struct msghdr *msghdr,
+                               struct sock *sk)
+{
+       struct sk_buff *skb;
+       int err;
+
+       skb = sctp_skb_recv_datagram(sk, MSG_PEEK, 1, &err);
+       if (skb != NULL) {
+               __sctp_ulpevent_read_nxtinfo(sctp_skb2event(skb),
+                                            msghdr, skb);
+               /* Just release refcount here. */
+               kfree_skb(skb);
+       }
 }
 
 /* Do accounting for bytes received and hold a reference to the association
index abf56b2..d8222c0 100644 (file)
@@ -725,14 +725,10 @@ void __sock_recv_timestamp(struct msghdr *msg, struct sock *sk,
        if (sock_flag(sk, SOCK_TIMESTAMPING_SOFTWARE) &&
            ktime_to_timespec_cond(skb->tstamp, ts + 0))
                empty = 0;
-       if (shhwtstamps) {
-               if (sock_flag(sk, SOCK_TIMESTAMPING_SYS_HARDWARE) &&
-                   ktime_to_timespec_cond(shhwtstamps->syststamp, ts + 1))
-                       empty = 0;
-               if (sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) &&
-                   ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2))
-                       empty = 0;
-       }
+       if (shhwtstamps &&
+           sock_flag(sk, SOCK_TIMESTAMPING_RAW_HARDWARE) &&
+           ktime_to_timespec_cond(shhwtstamps->hwtstamp, ts + 2))
+               empty = 0;
        if (!empty)
                put_cmsg(msg, SOL_SOCKET,
                         SCM_TIMESTAMPING, sizeof(ts), &ts);
index 247e973..f773667 100644 (file)
@@ -592,6 +592,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
        put_group_info(acred.group_info);
        return ret;
 }
+EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
 
 void
 rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
index 2663167..dd13bfa 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.c: TIPC broadcast code
  *
- * Copyright (c) 2004-2006, Ericsson AB
+ * Copyright (c) 2004-2006, 2014, Ericsson AB
  * Copyright (c) 2004, Intel Corporation.
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
@@ -38,6 +38,8 @@
 #include "core.h"
 #include "link.h"
 #include "port.h"
+#include "socket.h"
+#include "msg.h"
 #include "bcast.h"
 #include "name_distr.h"
 
@@ -138,6 +140,11 @@ static void tipc_bclink_unlock(void)
                tipc_link_reset_all(node);
 }
 
+uint  tipc_bclink_get_mtu(void)
+{
+       return MAX_PKT_DEFAULT_MCAST;
+}
+
 void tipc_bclink_set_flags(unsigned int flags)
 {
        bclink->flags |= flags;
@@ -382,30 +389,50 @@ static void bclink_peek_nack(struct tipc_msg *msg)
        tipc_node_unlock(n_ptr);
 }
 
-/*
- * tipc_bclink_xmit - broadcast a packet to all nodes in cluster
+/* tipc_bclink_xmit - broadcast buffer chain to all nodes in cluster
+ *                    and to identified node local sockets
+ * @buf: chain of buffers containing message
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
 int tipc_bclink_xmit(struct sk_buff *buf)
 {
-       int res;
+       int rc = 0;
+       int bc = 0;
+       struct sk_buff *clbuf;
 
-       tipc_bclink_lock();
-
-       if (!bclink->bcast_nodes.count) {
-               res = msg_data_sz(buf_msg(buf));
-               kfree_skb(buf);
-               goto exit;
+       /* Prepare clone of message for local node */
+       clbuf = tipc_msg_reassemble(buf);
+       if (unlikely(!clbuf)) {
+               kfree_skb_list(buf);
+               return -EHOSTUNREACH;
        }
 
-       res = __tipc_link_xmit(bcl, buf);
-       if (likely(res >= 0)) {
-               bclink_set_last_sent();
-               bcl->stats.queue_sz_counts++;
-               bcl->stats.accu_queue_sz += bcl->out_queue_size;
+       /* Broadcast to all other nodes */
+       if (likely(bclink)) {
+               tipc_bclink_lock();
+               if (likely(bclink->bcast_nodes.count)) {
+                       rc = __tipc_link_xmit(bcl, buf);
+                       if (likely(!rc)) {
+                               bclink_set_last_sent();
+                               bcl->stats.queue_sz_counts++;
+                               bcl->stats.accu_queue_sz += bcl->out_queue_size;
+                       }
+                       bc = 1;
+               }
+               tipc_bclink_unlock();
        }
-exit:
-       tipc_bclink_unlock();
-       return res;
+
+       if (unlikely(!bc))
+               kfree_skb_list(buf);
+
+       /* Deliver message clone */
+       if (likely(!rc))
+               tipc_sk_mcast_rcv(clbuf);
+       else
+               kfree_skb(clbuf);
+
+       return rc;
 }
 
 /**
@@ -443,7 +470,7 @@ void tipc_bclink_rcv(struct sk_buff *buf)
        struct tipc_node *node;
        u32 next_in;
        u32 seqno;
-       int deferred;
+       int deferred = 0;
 
        /* Screen out unwanted broadcast messages */
 
@@ -494,7 +521,7 @@ receive:
                        tipc_bclink_unlock();
                        tipc_node_unlock(node);
                        if (likely(msg_mcast(msg)))
-                               tipc_port_mcast_rcv(buf, NULL);
+                               tipc_sk_mcast_rcv(buf);
                        else
                                kfree_skb(buf);
                } else if (msg_user(msg) == MSG_BUNDLER) {
@@ -559,6 +586,7 @@ receive:
 
                buf = node->bclink.deferred_head;
                node->bclink.deferred_head = buf->next;
+               buf->next = NULL;
                node->bclink.deferred_size--;
                goto receive;
        }
@@ -572,8 +600,7 @@ receive:
                node->bclink.deferred_size += deferred;
                bclink_update_last_sent(node, seqno);
                buf = NULL;
-       } else
-               deferred = 0;
+       }
 
        tipc_bclink_lock();
 
@@ -610,6 +637,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
                              struct tipc_media_addr *unused2)
 {
        int bp_index;
+       struct tipc_msg *msg = buf_msg(buf);
 
        /* Prepare broadcast link message for reliable transmission,
         * if first time trying to send it;
@@ -617,10 +645,7 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
         * since they are sent in an unreliable manner and don't need it
         */
        if (likely(!msg_non_seq(buf_msg(buf)))) {
-               struct tipc_msg *msg;
-
                bcbuf_set_acks(buf, bclink->bcast_nodes.count);
-               msg = buf_msg(buf);
                msg_set_non_seq(msg, 1);
                msg_set_mc_netid(msg, tipc_net_id);
                bcl->stats.sent_info++;
@@ -637,12 +662,14 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
        for (bp_index = 0; bp_index < MAX_BEARERS; bp_index++) {
                struct tipc_bearer *p = bcbearer->bpairs[bp_index].primary;
                struct tipc_bearer *s = bcbearer->bpairs[bp_index].secondary;
-               struct tipc_bearer *b = p;
+               struct tipc_bearer *bp[2] = {p, s};
+               struct tipc_bearer *b = bp[msg_link_selector(msg)];
                struct sk_buff *tbuf;
 
                if (!p)
                        break; /* No more bearers to try */
-
+               if (!b)
+                       b = p;
                tipc_nmap_diff(&bcbearer->remains, &b->nodes,
                               &bcbearer->remains_new);
                if (bcbearer->remains_new.count == bcbearer->remains.count)
@@ -659,13 +686,6 @@ static int tipc_bcbearer_send(struct sk_buff *buf, struct tipc_bearer *unused1,
                        tipc_bearer_send(b->identity, tbuf, &b->bcast_addr);
                        kfree_skb(tbuf); /* Bearer keeps a clone */
                }
-
-               /* Swap bearers for next packet */
-               if (s) {
-                       bcbearer->bpairs[bp_index].primary = s;
-                       bcbearer->bpairs[bp_index].secondary = p;
-               }
-
                if (bcbearer->remains_new.count == 0)
                        break; /* All targets reached */
 
index 00330c4..4875d95 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/bcast.h: Include file for TIPC broadcast code
  *
- * Copyright (c) 2003-2006, Ericsson AB
+ * Copyright (c) 2003-2006, 2014, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
@@ -89,7 +89,6 @@ void tipc_bclink_add_node(u32 addr);
 void tipc_bclink_remove_node(u32 addr);
 struct tipc_node *tipc_bclink_retransmit_to(void);
 void tipc_bclink_acknowledge(struct tipc_node *n_ptr, u32 acked);
-int  tipc_bclink_xmit(struct sk_buff *buf);
 void tipc_bclink_rcv(struct sk_buff *buf);
 u32  tipc_bclink_get_last_sent(void);
 u32  tipc_bclink_acks_missing(struct tipc_node *n_ptr);
@@ -98,5 +97,7 @@ int  tipc_bclink_stats(char *stats_buf, const u32 buf_size);
 int  tipc_bclink_reset_stats(void);
 int  tipc_bclink_set_queue_limits(u32 limit);
 void tipc_bcbearer_sort(struct tipc_node_map *nm_ptr, u32 node, bool action);
+uint  tipc_bclink_get_mtu(void);
+int tipc_bclink_xmit(struct sk_buff *buf);
 
 #endif
index ad2c57f..fb1485d 100644 (file)
@@ -82,15 +82,13 @@ static void tipc_link_proto_rcv(struct tipc_link *l_ptr, struct sk_buff *buf);
 static int  tipc_link_tunnel_rcv(struct tipc_node *n_ptr,
                                 struct sk_buff **buf);
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance);
-static int  tipc_link_iovec_long_xmit(struct tipc_port *sender,
-                                     struct iovec const *msg_sect,
-                                     unsigned int len, u32 destnode);
 static void link_state_event(struct tipc_link *l_ptr, u32 event);
 static void link_reset_statistics(struct tipc_link *l_ptr);
 static void link_print(struct tipc_link *l_ptr, const char *str);
-static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
 static void tipc_link_sync_xmit(struct tipc_link *l);
 static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf);
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf);
+static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf);
 
 /*
  *  Simple link routines
@@ -335,13 +333,15 @@ void tipc_link_delete_list(unsigned int bearer_id, bool shutting_down)
 static int link_schedule_port(struct tipc_link *l_ptr, u32 origport, u32 sz)
 {
        struct tipc_port *p_ptr;
+       struct tipc_sock *tsk;
 
        spin_lock_bh(&tipc_port_list_lock);
        p_ptr = tipc_port_lock(origport);
        if (p_ptr) {
                if (!list_empty(&p_ptr->wait_list))
                        goto exit;
-               p_ptr->congested = 1;
+               tsk = tipc_port_to_sock(p_ptr);
+               tsk->link_cong = 1;
                p_ptr->waiting_pkts = 1 + ((sz - 1) / l_ptr->max_pkt);
                list_add_tail(&p_ptr->wait_list, &l_ptr->waiting_ports);
                l_ptr->stats.link_congs++;
@@ -355,6 +355,7 @@ exit:
 void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
 {
        struct tipc_port *p_ptr;
+       struct tipc_sock *tsk;
        struct tipc_port *temp_p_ptr;
        int win = l_ptr->queue_limit[0] - l_ptr->out_queue_size;
 
@@ -370,10 +371,11 @@ void tipc_link_wakeup_ports(struct tipc_link *l_ptr, int all)
                                 wait_list) {
                if (win <= 0)
                        break;
+               tsk = tipc_port_to_sock(p_ptr);
                list_del_init(&p_ptr->wait_list);
                spin_lock_bh(p_ptr->lock);
-               p_ptr->congested = 0;
-               tipc_port_wakeup(p_ptr);
+               tsk->link_cong = 0;
+               tipc_sock_wakeup(tsk);
                win -= p_ptr->waiting_pkts;
                spin_unlock_bh(p_ptr->lock);
        }
@@ -676,178 +678,142 @@ static void link_state_event(struct tipc_link *l_ptr, unsigned int event)
        }
 }
 
-/*
- * link_bundle_buf(): Append contents of a buffer to
- * the tail of an existing one.
+/* tipc_link_cong: determine return value and how to treat the
+ * sent buffer during link congestion.
+ * - For plain, errorless user data messages we keep the buffer and
+ *   return -ELINKONG.
+ * - For all other messages we discard the buffer and return -EHOSTUNREACH
+ * - For TIPC internal messages we also reset the link
  */
-static int link_bundle_buf(struct tipc_link *l_ptr, struct sk_buff *bundler,
-                          struct sk_buff *buf)
+static int tipc_link_cong(struct tipc_link *link, struct sk_buff *buf)
 {
-       struct tipc_msg *bundler_msg = buf_msg(bundler);
        struct tipc_msg *msg = buf_msg(buf);
-       u32 size = msg_size(msg);
-       u32 bundle_size = msg_size(bundler_msg);
-       u32 to_pos = align(bundle_size);
-       u32 pad = to_pos - bundle_size;
-
-       if (msg_user(bundler_msg) != MSG_BUNDLER)
-               return 0;
-       if (msg_type(bundler_msg) != OPEN_MSG)
-               return 0;
-       if (skb_tailroom(bundler) < (pad + size))
-               return 0;
-       if (l_ptr->max_pkt < (to_pos + size))
-               return 0;
-
-       skb_put(bundler, pad + size);
-       skb_copy_to_linear_data_offset(bundler, to_pos, buf->data, size);
-       msg_set_size(bundler_msg, to_pos + size);
-       msg_set_msgcnt(bundler_msg, msg_msgcnt(bundler_msg) + 1);
-       kfree_skb(buf);
-       l_ptr->stats.sent_bundled++;
-       return 1;
-}
-
-static void link_add_to_outqueue(struct tipc_link *l_ptr,
-                                struct sk_buff *buf,
-                                struct tipc_msg *msg)
-{
-       u32 ack = mod(l_ptr->next_in_no - 1);
-       u32 seqno = mod(l_ptr->next_out_no++);
+       uint psz = msg_size(msg);
+       uint imp = tipc_msg_tot_importance(msg);
+       u32 oport = msg_tot_origport(msg);
 
-       msg_set_word(msg, 2, ((ack << 16) | seqno));
-       msg_set_bcast_ack(msg, l_ptr->owner->bclink.last_in);
-       buf->next = NULL;
-       if (l_ptr->first_out) {
-               l_ptr->last_out->next = buf;
-               l_ptr->last_out = buf;
-       } else
-               l_ptr->first_out = l_ptr->last_out = buf;
-
-       l_ptr->out_queue_size++;
-       if (l_ptr->out_queue_size > l_ptr->stats.max_queue_sz)
-               l_ptr->stats.max_queue_sz = l_ptr->out_queue_size;
-}
-
-static void link_add_chain_to_outqueue(struct tipc_link *l_ptr,
-                                      struct sk_buff *buf_chain,
-                                      u32 long_msgno)
-{
-       struct sk_buff *buf;
-       struct tipc_msg *msg;
-
-       if (!l_ptr->next_out)
-               l_ptr->next_out = buf_chain;
-       while (buf_chain) {
-               buf = buf_chain;
-               buf_chain = buf_chain->next;
-
-               msg = buf_msg(buf);
-               msg_set_long_msgno(msg, long_msgno);
-               link_add_to_outqueue(l_ptr, buf, msg);
+       if (likely(imp <= TIPC_CRITICAL_IMPORTANCE)) {
+               if (!msg_errcode(msg) && !msg_reroute_cnt(msg)) {
+                       link_schedule_port(link, oport, psz);
+                       return -ELINKCONG;
+               }
+       } else {
+               pr_warn("%s<%s>, send queue full", link_rst_msg, link->name);
+               tipc_link_reset(link);
        }
+       kfree_skb_list(buf);
+       return -EHOSTUNREACH;
 }
 
-/*
- * tipc_link_xmit() is the 'full path' for messages, called from
- * inside TIPC when the 'fast path' in tipc_send_xmit
- * has failed, and from link_send()
+/**
+ * __tipc_link_xmit(): same as tipc_link_xmit, but destlink is known & locked
+ * @link: link to use
+ * @buf: chain of buffers containing message
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG, -EMSGSIZE (plain socket
+ * user data messages) or -EHOSTUNREACH (all other messages/senders)
+ * Only the socket functions tipc_send_stream() and tipc_send_packet() need
+ * to act on the return value, since they may need to do more send attempts.
  */
-int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf)
 {
        struct tipc_msg *msg = buf_msg(buf);
-       u32 size = msg_size(msg);
-       u32 dsz = msg_data_sz(msg);
-       u32 queue_size = l_ptr->out_queue_size;
-       u32 imp = tipc_msg_tot_importance(msg);
-       u32 queue_limit = l_ptr->queue_limit[imp];
-       u32 max_packet = l_ptr->max_pkt;
-
-       /* Match msg importance against queue limits: */
-       if (unlikely(queue_size >= queue_limit)) {
-               if (imp <= TIPC_CRITICAL_IMPORTANCE) {
-                       link_schedule_port(l_ptr, msg_origport(msg), size);
-                       kfree_skb(buf);
-                       return -ELINKCONG;
-               }
-               kfree_skb(buf);
-               if (imp > CONN_MANAGER) {
-                       pr_warn("%s<%s>, send queue full", link_rst_msg,
-                               l_ptr->name);
-                       tipc_link_reset(l_ptr);
-               }
-               return dsz;
+       uint psz = msg_size(msg);
+       uint qsz = link->out_queue_size;
+       uint sndlim = link->queue_limit[0];
+       uint imp = tipc_msg_tot_importance(msg);
+       uint mtu = link->max_pkt;
+       uint ack = mod(link->next_in_no - 1);
+       uint seqno = link->next_out_no;
+       uint bc_last_in = link->owner->bclink.last_in;
+       struct tipc_media_addr *addr = &link->media_addr;
+       struct sk_buff *next = buf->next;
+
+       /* Match queue limits against msg importance: */
+       if (unlikely(qsz >= link->queue_limit[imp]))
+               return tipc_link_cong(link, buf);
+
+       /* Has valid packet limit been used ? */
+       if (unlikely(psz > mtu)) {
+               kfree_skb_list(buf);
+               return -EMSGSIZE;
        }
 
-       /* Fragmentation needed ? */
-       if (size > max_packet)
-               return tipc_link_frag_xmit(l_ptr, buf);
-
-       /* Packet can be queued or sent. */
-       if (likely(!link_congested(l_ptr))) {
-               link_add_to_outqueue(l_ptr, buf, msg);
+       /* Prepare each packet for sending, and add to outqueue: */
+       while (buf) {
+               next = buf->next;
+               msg = buf_msg(buf);
+               msg_set_word(msg, 2, ((ack << 16) | mod(seqno)));
+               msg_set_bcast_ack(msg, bc_last_in);
+
+               if (!link->first_out) {
+                       link->first_out = buf;
+               } else if (qsz < sndlim) {
+                       link->last_out->next = buf;
+               } else if (tipc_msg_bundle(link->last_out, buf, mtu)) {
+                       link->stats.sent_bundled++;
+                       buf = next;
+                       next = buf->next;
+                       continue;
+               } else if (tipc_msg_make_bundle(&buf, mtu, link->addr)) {
+                       link->stats.sent_bundled++;
+                       link->stats.sent_bundles++;
+                       link->last_out->next = buf;
+                       if (!link->next_out)
+                               link->next_out = buf;
+               } else {
+                       link->last_out->next = buf;
+                       if (!link->next_out)
+                               link->next_out = buf;
+               }
 
-               tipc_bearer_send(l_ptr->bearer_id, buf, &l_ptr->media_addr);
-               l_ptr->unacked_window = 0;
-               return dsz;
-       }
-       /* Congestion: can message be bundled ? */
-       if ((msg_user(msg) != CHANGEOVER_PROTOCOL) &&
-           (msg_user(msg) != MSG_FRAGMENTER)) {
-
-               /* Try adding message to an existing bundle */
-               if (l_ptr->next_out &&
-                   link_bundle_buf(l_ptr, l_ptr->last_out, buf))
-                       return dsz;
-
-               /* Try creating a new bundle */
-               if (size <= max_packet * 2 / 3) {
-                       struct sk_buff *bundler = tipc_buf_acquire(max_packet);
-                       struct tipc_msg bundler_hdr;
-
-                       if (bundler) {
-                               tipc_msg_init(&bundler_hdr, MSG_BUNDLER, OPEN_MSG,
-                                        INT_H_SIZE, l_ptr->addr);
-                               skb_copy_to_linear_data(bundler, &bundler_hdr,
-                                                       INT_H_SIZE);
-                               skb_trim(bundler, INT_H_SIZE);
-                               link_bundle_buf(l_ptr, bundler, buf);
-                               buf = bundler;
-                               msg = buf_msg(buf);
-                               l_ptr->stats.sent_bundles++;
-                       }
+               /* Send packet if possible: */
+               if (likely(++qsz <= sndlim)) {
+                       tipc_bearer_send(link->bearer_id, buf, addr);
+                       link->next_out = next;
+                       link->unacked_window = 0;
                }
+               seqno++;
+               link->last_out = buf;
+               buf = next;
        }
-       if (!l_ptr->next_out)
-               l_ptr->next_out = buf;
-       link_add_to_outqueue(l_ptr, buf, msg);
-       return dsz;
+       link->next_out_no = seqno;
+       link->out_queue_size = qsz;
+       return 0;
 }
 
-/*
- * tipc_link_xmit(): same as __tipc_link_xmit(), but the link to use
- * has not been selected yet, and the the owner node is not locked
- * Called by TIPC internal users, e.g. the name distributor
+/**
+ * tipc_link_xmit() is the general link level function for message sending
+ * @buf: chain of buffers containing message
+ * @dsz: amount of user data to be sent
+ * @dnode: address of destination node
+ * @selector: a number used for deterministic link selection
+ * Consumes the buffer chain, except when returning -ELINKCONG
+ * Returns 0 if success, otherwise errno: -ELINKCONG,-EHOSTUNREACH,-EMSGSIZE
  */
-int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
+int tipc_link_xmit(struct sk_buff *buf, u32 dnode, u32 selector)
 {
-       struct tipc_link *l_ptr;
-       struct tipc_node *n_ptr;
-       int res = -ELINKCONG;
+       struct tipc_link *link = NULL;
+       struct tipc_node *node;
+       int rc = -EHOSTUNREACH;
 
-       n_ptr = tipc_node_find(dest);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[selector & 1];
-               if (l_ptr)
-                       res = __tipc_link_xmit(l_ptr, buf);
-               else
-                       kfree_skb(buf);
-               tipc_node_unlock(n_ptr);
-       } else {
-               kfree_skb(buf);
+       node = tipc_node_find(dnode);
+       if (node) {
+               tipc_node_lock(node);
+               link = node->active_links[selector & 1];
+               if (link)
+                       rc = __tipc_link_xmit(link, buf);
+               tipc_node_unlock(node);
        }
-       return res;
+
+       if (link)
+               return rc;
+
+       if (likely(in_own_node(dnode)))
+               return tipc_sk_rcv(buf);
+
+       kfree_skb_list(buf);
+       return rc;
 }
 
 /*
@@ -858,7 +824,7 @@ int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector)
  *
  * Called with node locked
  */
-static void tipc_link_sync_xmit(struct tipc_link *l)
+static void tipc_link_sync_xmit(struct tipc_link *link)
 {
        struct sk_buff *buf;
        struct tipc_msg *msg;
@@ -868,10 +834,9 @@ static void tipc_link_sync_xmit(struct tipc_link *l)
                return;
 
        msg = buf_msg(buf);
-       tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, l->addr);
-       msg_set_last_bcast(msg, l->owner->bclink.acked);
-       link_add_chain_to_outqueue(l, buf, 0);
-       tipc_link_push_queue(l);
+       tipc_msg_init(msg, BCAST_PROTOCOL, STATE_MSG, INT_H_SIZE, link->addr);
+       msg_set_last_bcast(msg, link->owner->bclink.acked);
+       __tipc_link_xmit(link, buf);
 }
 
 /*
@@ -891,293 +856,6 @@ static void tipc_link_sync_rcv(struct tipc_node *n, struct sk_buff *buf)
        kfree_skb(buf);
 }
 
-/*
- * tipc_link_names_xmit - send name table entries to new neighbor
- *
- * Send routine for bulk delivery of name table messages when contact
- * with a new neighbor occurs. No link congestion checking is performed
- * because name table messages *must* be delivered. The messages must be
- * small enough not to require fragmentation.
- * Called without any locks held.
- */
-void tipc_link_names_xmit(struct list_head *message_list, u32 dest)
-{
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
-       struct sk_buff *buf;
-       struct sk_buff *temp_buf;
-
-       if (list_empty(message_list))
-               return;
-
-       n_ptr = tipc_node_find(dest);
-       if (n_ptr) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[0];
-               if (l_ptr) {
-                       /* convert circular list to linear list */
-                       ((struct sk_buff *)message_list->prev)->next = NULL;
-                       link_add_chain_to_outqueue(l_ptr,
-                               (struct sk_buff *)message_list->next, 0);
-                       tipc_link_push_queue(l_ptr);
-                       INIT_LIST_HEAD(message_list);
-               }
-               tipc_node_unlock(n_ptr);
-       }
-
-       /* discard the messages if they couldn't be sent */
-       list_for_each_safe(buf, temp_buf, ((struct sk_buff *)message_list)) {
-               list_del((struct list_head *)buf);
-               kfree_skb(buf);
-       }
-}
-
-/*
- * tipc_link_xmit_fast: Entry for data messages where the
- * destination link is known and the header is complete,
- * inclusive total message length. Very time critical.
- * Link is locked. Returns user data length.
- */
-static int tipc_link_xmit_fast(struct tipc_link *l_ptr, struct sk_buff *buf,
-                              u32 *used_max_pkt)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       int res = msg_data_sz(msg);
-
-       if (likely(!link_congested(l_ptr))) {
-               if (likely(msg_size(msg) <= l_ptr->max_pkt)) {
-                       link_add_to_outqueue(l_ptr, buf, msg);
-                       tipc_bearer_send(l_ptr->bearer_id, buf,
-                                        &l_ptr->media_addr);
-                       l_ptr->unacked_window = 0;
-                       return res;
-               }
-               else
-                       *used_max_pkt = l_ptr->max_pkt;
-       }
-       return __tipc_link_xmit(l_ptr, buf);  /* All other cases */
-}
-
-/*
- * tipc_link_iovec_xmit_fast: Entry for messages where the
- * destination processor is known and the header is complete,
- * except for total message length.
- * Returns user data length or errno.
- */
-int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
-                             struct iovec const *msg_sect,
-                             unsigned int len, u32 destaddr)
-{
-       struct tipc_msg *hdr = &sender->phdr;
-       struct tipc_link *l_ptr;
-       struct sk_buff *buf;
-       struct tipc_node *node;
-       int res;
-       u32 selector = msg_origport(hdr) & 1;
-
-again:
-       /*
-        * Try building message using port's max_pkt hint.
-        * (Must not hold any locks while building message.)
-        */
-       res = tipc_msg_build(hdr, msg_sect, len, sender->max_pkt, &buf);
-       /* Exit if build request was invalid */
-       if (unlikely(res < 0))
-               return res;
-
-       node = tipc_node_find(destaddr);
-       if (likely(node)) {
-               tipc_node_lock(node);
-               l_ptr = node->active_links[selector];
-               if (likely(l_ptr)) {
-                       if (likely(buf)) {
-                               res = tipc_link_xmit_fast(l_ptr, buf,
-                                                         &sender->max_pkt);
-exit:
-                               tipc_node_unlock(node);
-                               return res;
-                       }
-
-                       /* Exit if link (or bearer) is congested */
-                       if (link_congested(l_ptr)) {
-                               res = link_schedule_port(l_ptr,
-                                                        sender->ref, res);
-                               goto exit;
-                       }
-
-                       /*
-                        * Message size exceeds max_pkt hint; update hint,
-                        * then re-try fast path or fragment the message
-                        */
-                       sender->max_pkt = l_ptr->max_pkt;
-                       tipc_node_unlock(node);
-
-
-                       if ((msg_hdr_sz(hdr) + res) <= sender->max_pkt)
-                               goto again;
-
-                       return tipc_link_iovec_long_xmit(sender, msg_sect,
-                                                        len, destaddr);
-               }
-               tipc_node_unlock(node);
-       }
-
-       /* Couldn't find a link to the destination node */
-       kfree_skb(buf);
-       tipc_port_iovec_reject(sender, hdr, msg_sect, len, TIPC_ERR_NO_NODE);
-       return -ENETUNREACH;
-}
-
-/*
- * tipc_link_iovec_long_xmit(): Entry for long messages where the
- * destination node is known and the header is complete,
- * inclusive total message length.
- * Link and bearer congestion status have been checked to be ok,
- * and are ignored if they change.
- *
- * Note that fragments do not use the full link MTU so that they won't have
- * to undergo refragmentation if link changeover causes them to be sent
- * over another link with an additional tunnel header added as prefix.
- * (Refragmentation will still occur if the other link has a smaller MTU.)
- *
- * Returns user data length or errno.
- */
-static int tipc_link_iovec_long_xmit(struct tipc_port *sender,
-                                    struct iovec const *msg_sect,
-                                    unsigned int len, u32 destaddr)
-{
-       struct tipc_link *l_ptr;
-       struct tipc_node *node;
-       struct tipc_msg *hdr = &sender->phdr;
-       u32 dsz = len;
-       u32 max_pkt, fragm_sz, rest;
-       struct tipc_msg fragm_hdr;
-       struct sk_buff *buf, *buf_chain, *prev;
-       u32 fragm_crs, fragm_rest, hsz, sect_rest;
-       const unchar __user *sect_crs;
-       int curr_sect;
-       u32 fragm_no;
-       int res = 0;
-
-again:
-       fragm_no = 1;
-       max_pkt = sender->max_pkt - INT_H_SIZE;
-               /* leave room for tunnel header in case of link changeover */
-       fragm_sz = max_pkt - INT_H_SIZE;
-               /* leave room for fragmentation header in each fragment */
-       rest = dsz;
-       fragm_crs = 0;
-       fragm_rest = 0;
-       sect_rest = 0;
-       sect_crs = NULL;
-       curr_sect = -1;
-
-       /* Prepare reusable fragment header */
-       tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
-                INT_H_SIZE, msg_destnode(hdr));
-       msg_set_size(&fragm_hdr, max_pkt);
-       msg_set_fragm_no(&fragm_hdr, 1);
-
-       /* Prepare header of first fragment */
-       buf_chain = buf = tipc_buf_acquire(max_pkt);
-       if (!buf)
-               return -ENOMEM;
-       buf->next = NULL;
-       skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
-       hsz = msg_hdr_sz(hdr);
-       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, hdr, hsz);
-
-       /* Chop up message */
-       fragm_crs = INT_H_SIZE + hsz;
-       fragm_rest = fragm_sz - hsz;
-
-       do {            /* For all sections */
-               u32 sz;
-
-               if (!sect_rest) {
-                       sect_rest = msg_sect[++curr_sect].iov_len;
-                       sect_crs = msg_sect[curr_sect].iov_base;
-               }
-
-               if (sect_rest < fragm_rest)
-                       sz = sect_rest;
-               else
-                       sz = fragm_rest;
-
-               if (copy_from_user(buf->data + fragm_crs, sect_crs, sz)) {
-                       res = -EFAULT;
-error:
-                       kfree_skb_list(buf_chain);
-                       return res;
-               }
-               sect_crs += sz;
-               sect_rest -= sz;
-               fragm_crs += sz;
-               fragm_rest -= sz;
-               rest -= sz;
-
-               if (!fragm_rest && rest) {
-
-                       /* Initiate new fragment: */
-                       if (rest <= fragm_sz) {
-                               fragm_sz = rest;
-                               msg_set_type(&fragm_hdr, LAST_FRAGMENT);
-                       } else {
-                               msg_set_type(&fragm_hdr, FRAGMENT);
-                       }
-                       msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
-                       msg_set_fragm_no(&fragm_hdr, ++fragm_no);
-                       prev = buf;
-                       buf = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
-                       if (!buf) {
-                               res = -ENOMEM;
-                               goto error;
-                       }
-
-                       buf->next = NULL;
-                       prev->next = buf;
-                       skb_copy_to_linear_data(buf, &fragm_hdr, INT_H_SIZE);
-                       fragm_crs = INT_H_SIZE;
-                       fragm_rest = fragm_sz;
-               }
-       } while (rest > 0);
-
-       /*
-        * Now we have a buffer chain. Select a link and check
-        * that packet size is still OK
-        */
-       node = tipc_node_find(destaddr);
-       if (likely(node)) {
-               tipc_node_lock(node);
-               l_ptr = node->active_links[sender->ref & 1];
-               if (!l_ptr) {
-                       tipc_node_unlock(node);
-                       goto reject;
-               }
-               if (l_ptr->max_pkt < max_pkt) {
-                       sender->max_pkt = l_ptr->max_pkt;
-                       tipc_node_unlock(node);
-                       kfree_skb_list(buf_chain);
-                       goto again;
-               }
-       } else {
-reject:
-               kfree_skb_list(buf_chain);
-               tipc_port_iovec_reject(sender, hdr, msg_sect, len,
-                                      TIPC_ERR_NO_NODE);
-               return -ENETUNREACH;
-       }
-
-       /* Append chain of fragments to send queue & send them */
-       l_ptr->long_msg_seq_no++;
-       link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
-       l_ptr->stats.sent_fragments += fragm_no;
-       l_ptr->stats.sent_fragmented++;
-       tipc_link_push_queue(l_ptr);
-       tipc_node_unlock(node);
-       return dsz;
-}
-
 /*
  * tipc_link_push_packet: Push one unsent packet to the media
  */
@@ -1238,7 +916,7 @@ static u32 tipc_link_push_packet(struct tipc_link *l_ptr)
                        tipc_bearer_send(l_ptr->bearer_id, buf,
                                         &l_ptr->media_addr);
                        if (msg_user(msg) == MSG_BUNDLER)
-                               msg_set_type(msg, CLOSED_MSG);
+                               msg_set_type(msg, BUNDLE_CLOSED);
                        l_ptr->next_out = buf->next;
                        return 0;
                }
@@ -1527,11 +1205,6 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if (unlikely(!list_empty(&l_ptr->waiting_ports)))
                        tipc_link_wakeup_ports(l_ptr, 0);
 
-               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
-                       l_ptr->stats.sent_acks++;
-                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
-               }
-
                /* Process the incoming packet */
                if (unlikely(!link_working_working(l_ptr))) {
                        if (msg_user(msg) == LINK_PROTOCOL) {
@@ -1565,57 +1238,19 @@ void tipc_rcv(struct sk_buff *head, struct tipc_bearer *b_ptr)
                if (unlikely(l_ptr->oldest_deferred_in))
                        head = link_insert_deferred_queue(l_ptr, head);
 
-               /* Deliver packet/message to correct user: */
-               if (unlikely(msg_user(msg) ==  CHANGEOVER_PROTOCOL)) {
-                       if (!tipc_link_tunnel_rcv(n_ptr, &buf)) {
-                               tipc_node_unlock(n_ptr);
-                               continue;
-                       }
-                       msg = buf_msg(buf);
-               } else if (msg_user(msg) == MSG_FRAGMENTER) {
-                       l_ptr->stats.recv_fragments++;
-                       if (tipc_buf_append(&l_ptr->reasm_buf, &buf)) {
-                               l_ptr->stats.recv_fragmented++;
-                               msg = buf_msg(buf);
-                       } else {
-                               if (!l_ptr->reasm_buf)
-                                       tipc_link_reset(l_ptr);
-                               tipc_node_unlock(n_ptr);
-                               continue;
-                       }
+               if (unlikely(++l_ptr->unacked_window >= TIPC_MIN_LINK_WIN)) {
+                       l_ptr->stats.sent_acks++;
+                       tipc_link_proto_xmit(l_ptr, STATE_MSG, 0, 0, 0, 0, 0);
                }
 
-               switch (msg_user(msg)) {
-               case TIPC_LOW_IMPORTANCE:
-               case TIPC_MEDIUM_IMPORTANCE:
-               case TIPC_HIGH_IMPORTANCE:
-               case TIPC_CRITICAL_IMPORTANCE:
+               if (tipc_link_prepare_input(l_ptr, &buf)) {
                        tipc_node_unlock(n_ptr);
-                       tipc_sk_rcv(buf);
                        continue;
-               case MSG_BUNDLER:
-                       l_ptr->stats.recv_bundles++;
-                       l_ptr->stats.recv_bundled += msg_msgcnt(msg);
-                       tipc_node_unlock(n_ptr);
-                       tipc_link_bundle_rcv(buf);
-                       continue;
-               case NAME_DISTRIBUTOR:
-                       n_ptr->bclink.recv_permitted = true;
-                       tipc_node_unlock(n_ptr);
-                       tipc_named_rcv(buf);
-                       continue;
-               case CONN_MANAGER:
-                       tipc_node_unlock(n_ptr);
-                       tipc_port_proto_rcv(buf);
-                       continue;
-               case BCAST_PROTOCOL:
-                       tipc_link_sync_rcv(n_ptr, buf);
-                       break;
-               default:
-                       kfree_skb(buf);
-                       break;
                }
                tipc_node_unlock(n_ptr);
+               msg = buf_msg(buf);
+               if (tipc_link_input(l_ptr, buf) != 0)
+                       goto discard;
                continue;
 unlock_discard:
                tipc_node_unlock(n_ptr);
@@ -1624,6 +1259,80 @@ discard:
        }
 }
 
+/**
+ * tipc_link_prepare_input - process TIPC link messages
+ *
+ * returns nonzero if the message was consumed
+ *
+ * Node lock must be held
+ */
+static int tipc_link_prepare_input(struct tipc_link *l, struct sk_buff **buf)
+{
+       struct tipc_node *n;
+       struct tipc_msg *msg;
+       int res = -EINVAL;
+
+       n = l->owner;
+       msg = buf_msg(*buf);
+       switch (msg_user(msg)) {
+       case CHANGEOVER_PROTOCOL:
+               if (tipc_link_tunnel_rcv(n, buf))
+                       res = 0;
+               break;
+       case MSG_FRAGMENTER:
+               l->stats.recv_fragments++;
+               if (tipc_buf_append(&l->reasm_buf, buf)) {
+                       l->stats.recv_fragmented++;
+                       res = 0;
+               } else if (!l->reasm_buf) {
+                       tipc_link_reset(l);
+               }
+               break;
+       case MSG_BUNDLER:
+               l->stats.recv_bundles++;
+               l->stats.recv_bundled += msg_msgcnt(msg);
+               res = 0;
+               break;
+       case NAME_DISTRIBUTOR:
+               n->bclink.recv_permitted = true;
+               res = 0;
+               break;
+       case BCAST_PROTOCOL:
+               tipc_link_sync_rcv(n, *buf);
+               break;
+       default:
+               res = 0;
+       }
+       return res;
+}
+/**
+ * tipc_link_input - Deliver message too higher layers
+ */
+static int tipc_link_input(struct tipc_link *l, struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       int res = 0;
+
+       switch (msg_user(msg)) {
+       case TIPC_LOW_IMPORTANCE:
+       case TIPC_MEDIUM_IMPORTANCE:
+       case TIPC_HIGH_IMPORTANCE:
+       case TIPC_CRITICAL_IMPORTANCE:
+       case CONN_MANAGER:
+               tipc_sk_rcv(buf);
+               break;
+       case NAME_DISTRIBUTOR:
+               tipc_named_rcv(buf);
+               break;
+       case MSG_BUNDLER:
+               tipc_link_bundle_rcv(buf);
+               break;
+       default:
+               res = -EINVAL;
+       }
+       return res;
+}
+
 /**
  * tipc_link_defer_pkt - Add out-of-sequence message to deferred reception queue
  *
@@ -2217,6 +1926,7 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
        u32 msgcount = msg_msgcnt(buf_msg(buf));
        u32 pos = INT_H_SIZE;
        struct sk_buff *obuf;
+       struct tipc_msg *omsg;
 
        while (msgcount--) {
                obuf = buf_extract(buf, pos);
@@ -2224,82 +1934,18 @@ void tipc_link_bundle_rcv(struct sk_buff *buf)
                        pr_warn("Link unable to unbundle message(s)\n");
                        break;
                }
-               pos += align(msg_size(buf_msg(obuf)));
-               tipc_net_route_msg(obuf);
-       }
-       kfree_skb(buf);
-}
-
-/*
- *  Fragmentation/defragmentation:
- */
-
-/*
- * tipc_link_frag_xmit: Entry for buffers needing fragmentation.
- * The buffer is complete, inclusive total message length.
- * Returns user data length.
- */
-static int tipc_link_frag_xmit(struct tipc_link *l_ptr, struct sk_buff *buf)
-{
-       struct sk_buff *buf_chain = NULL;
-       struct sk_buff *buf_chain_tail = (struct sk_buff *)&buf_chain;
-       struct tipc_msg *inmsg = buf_msg(buf);
-       struct tipc_msg fragm_hdr;
-       u32 insize = msg_size(inmsg);
-       u32 dsz = msg_data_sz(inmsg);
-       unchar *crs = buf->data;
-       u32 rest = insize;
-       u32 pack_sz = l_ptr->max_pkt;
-       u32 fragm_sz = pack_sz - INT_H_SIZE;
-       u32 fragm_no = 0;
-       u32 destaddr;
-
-       if (msg_short(inmsg))
-               destaddr = l_ptr->addr;
-       else
-               destaddr = msg_destnode(inmsg);
-
-       /* Prepare reusable fragment header: */
-       tipc_msg_init(&fragm_hdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
-                INT_H_SIZE, destaddr);
-
-       /* Chop up message: */
-       while (rest > 0) {
-               struct sk_buff *fragm;
-
-               if (rest <= fragm_sz) {
-                       fragm_sz = rest;
-                       msg_set_type(&fragm_hdr, LAST_FRAGMENT);
-               }
-               fragm = tipc_buf_acquire(fragm_sz + INT_H_SIZE);
-               if (fragm == NULL) {
-                       kfree_skb(buf);
-                       kfree_skb_list(buf_chain);
-                       return -ENOMEM;
+               omsg = buf_msg(obuf);
+               pos += align(msg_size(omsg));
+               if (msg_isdata(omsg) || (msg_user(omsg) == CONN_MANAGER)) {
+                       tipc_sk_rcv(obuf);
+               } else if (msg_user(omsg) == NAME_DISTRIBUTOR) {
+                       tipc_named_rcv(obuf);
+               } else {
+                       pr_warn("Illegal bundled msg: %u\n", msg_user(omsg));
+                       kfree_skb(obuf);
                }
-               msg_set_size(&fragm_hdr, fragm_sz + INT_H_SIZE);
-               fragm_no++;
-               msg_set_fragm_no(&fragm_hdr, fragm_no);
-               skb_copy_to_linear_data(fragm, &fragm_hdr, INT_H_SIZE);
-               skb_copy_to_linear_data_offset(fragm, INT_H_SIZE, crs,
-                                              fragm_sz);
-               buf_chain_tail->next = fragm;
-               buf_chain_tail = fragm;
-
-               rest -= fragm_sz;
-               crs += fragm_sz;
-               msg_set_type(&fragm_hdr, FRAGMENT);
        }
        kfree_skb(buf);
-
-       /* Append chain of fragments to send queue & send them */
-       l_ptr->long_msg_seq_no++;
-       link_add_chain_to_outqueue(l_ptr, buf_chain, l_ptr->long_msg_seq_no);
-       l_ptr->stats.sent_fragments += fragm_no;
-       l_ptr->stats.sent_fragmented++;
-       tipc_link_push_queue(l_ptr);
-
-       return dsz;
 }
 
 static void link_set_supervision_props(struct tipc_link *l_ptr, u32 tolerance)
index 200d518..782983c 100644 (file)
@@ -227,13 +227,8 @@ void tipc_link_reset_all(struct tipc_node *node);
 void tipc_link_reset(struct tipc_link *l_ptr);
 void tipc_link_reset_list(unsigned int bearer_id);
 int tipc_link_xmit(struct sk_buff *buf, u32 dest, u32 selector);
-void tipc_link_names_xmit(struct list_head *message_list, u32 dest);
-int __tipc_link_xmit(struct tipc_link *l_ptr, struct sk_buff *buf);
-int tipc_link_send_buf(struct tipc_link *l_ptr, struct sk_buff *buf);
+int __tipc_link_xmit(struct tipc_link *link, struct sk_buff *buf);
 u32 tipc_link_get_max_pkt(u32 dest, u32 selector);
-int tipc_link_iovec_xmit_fast(struct tipc_port *sender,
-                             struct iovec const *msg_sect,
-                             unsigned int len, u32 destnode);
 void tipc_link_bundle_rcv(struct sk_buff *buf);
 void tipc_link_proto_xmit(struct tipc_link *l_ptr, u32 msg_typ, int prob,
                          u32 gap, u32 tolerance, u32 priority, u32 acked_mtu);
index 8be6e94..9680be6 100644 (file)
 
 #include "core.h"
 #include "msg.h"
+#include "addr.h"
+#include "name_table.h"
 
-u32 tipc_msg_tot_importance(struct tipc_msg *m)
+#define MAX_FORWARD_SIZE 1024
+
+static unsigned int align(unsigned int i)
 {
-       if (likely(msg_isdata(m))) {
-               if (likely(msg_orignode(m) == tipc_own_addr))
-                       return msg_importance(m);
-               return msg_importance(m) + 4;
-       }
-       if ((msg_user(m) == MSG_FRAGMENTER)  &&
-           (msg_type(m) == FIRST_FRAGMENT))
-               return msg_importance(msg_get_wrapped(m));
-       return msg_importance(m);
+       return (i + 3) & ~3u;
 }
 
-
 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode)
 {
@@ -65,71 +60,50 @@ void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
        msg_set_destnode(m, destnode);
 }
 
-/**
- * tipc_msg_build - create message using specified header and data
- *
- * Note: Caller must not hold any locks in case copy_from_user() is interrupted!
- *
- * Returns message data size or errno
- */
-int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  unsigned int len, int max_size, struct sk_buff **buf)
-{
-       int dsz, sz, hsz;
-       unsigned char *to;
-
-       dsz = len;
-       hsz = msg_hdr_sz(hdr);
-       sz = hsz + dsz;
-       msg_set_size(hdr, sz);
-       if (unlikely(sz > max_size)) {
-               *buf = NULL;
-               return dsz;
-       }
-
-       *buf = tipc_buf_acquire(sz);
-       if (!(*buf))
-               return -ENOMEM;
-       skb_copy_to_linear_data(*buf, hdr, hsz);
-       to = (*buf)->data + hsz;
-       if (len && memcpy_fromiovecend(to, msg_sect, 0, dsz)) {
-               kfree_skb(*buf);
-               *buf = NULL;
-               return -EFAULT;
-       }
-       return dsz;
-}
-
 /* tipc_buf_append(): Append a buffer to the fragment list of another buffer
- * Let first buffer become head buffer
- * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0
- * Leaves headbuf pointer at NULL if failure
+ * @*headbuf: in:  NULL for first frag, otherwise value returned from prev call
+ *            out: set when successful non-complete reassembly, otherwise NULL
+ * @*buf:     in:  the buffer to append. Always defined
+ *            out: head buf after sucessful complete reassembly, otherwise NULL
+ * Returns 1 when reassembly complete, otherwise 0
  */
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
 {
        struct sk_buff *head = *headbuf;
        struct sk_buff *frag = *buf;
        struct sk_buff *tail;
-       struct tipc_msg *msg = buf_msg(frag);
-       u32 fragid = msg_type(msg);
-       bool headstolen;
+       struct tipc_msg *msg;
+       u32 fragid;
        int delta;
+       bool headstolen;
+
+       if (!frag)
+               goto err;
 
+       msg = buf_msg(frag);
+       fragid = msg_type(msg);
+       frag->next = NULL;
        skb_pull(frag, msg_hdr_sz(msg));
 
        if (fragid == FIRST_FRAGMENT) {
-               if (head || skb_unclone(frag, GFP_ATOMIC))
-                       goto out_free;
+               if (unlikely(head))
+                       goto err;
+               if (unlikely(skb_unclone(frag, GFP_ATOMIC)))
+                       goto err;
                head = *headbuf = frag;
                skb_frag_list_init(head);
+               TIPC_SKB_CB(head)->tail = NULL;
+               *buf = NULL;
                return 0;
        }
+
        if (!head)
-               goto out_free;
-       tail = TIPC_SKB_CB(head)->tail;
+               goto err;
+
        if (skb_try_coalesce(head, frag, &headstolen, &delta)) {
                kfree_skb_partial(frag, headstolen);
        } else {
+               tail = TIPC_SKB_CB(head)->tail;
                if (!skb_has_frag_list(head))
                        skb_shinfo(head)->frag_list = frag;
                else
@@ -139,6 +113,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
                head->len += frag->len;
                TIPC_SKB_CB(head)->tail = frag;
        }
+
        if (fragid == LAST_FRAGMENT) {
                *buf = head;
                TIPC_SKB_CB(head)->tail = NULL;
@@ -147,8 +122,311 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
        }
        *buf = NULL;
        return 0;
-out_free:
+
+err:
        pr_warn_ratelimited("Unable to build fragment list\n");
        kfree_skb(*buf);
+       kfree_skb(*headbuf);
+       *buf = *headbuf = NULL;
        return 0;
 }
+
+
+/**
+ * tipc_msg_build - create buffer chain containing specified header and data
+ * @mhdr: Message header, to be prepended to data
+ * @iov: User data
+ * @offset: Posision in iov to start copying from
+ * @dsz: Total length of user data
+ * @pktmax: Max packet size that can be used
+ * @chain: Buffer or chain of buffers to be returned to caller
+ * Returns message data size or errno: -ENOMEM, -EFAULT
+ */
+int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
+                  int offset, int dsz, int pktmax , struct sk_buff **chain)
+{
+       int mhsz = msg_hdr_sz(mhdr);
+       int msz = mhsz + dsz;
+       int pktno = 1;
+       int pktsz;
+       int pktrem = pktmax;
+       int drem = dsz;
+       struct tipc_msg pkthdr;
+       struct sk_buff *buf, *prev;
+       char *pktpos;
+       int rc;
+
+       msg_set_size(mhdr, msz);
+
+       /* No fragmentation needed? */
+       if (likely(msz <= pktmax)) {
+               buf = tipc_buf_acquire(msz);
+               *chain = buf;
+               if (unlikely(!buf))
+                       return -ENOMEM;
+               skb_copy_to_linear_data(buf, mhdr, mhsz);
+               pktpos = buf->data + mhsz;
+               if (!dsz || !memcpy_fromiovecend(pktpos, iov, offset, dsz))
+                       return dsz;
+               rc = -EFAULT;
+               goto error;
+       }
+
+       /* Prepare reusable fragment header */
+       tipc_msg_init(&pkthdr, MSG_FRAGMENTER, FIRST_FRAGMENT,
+                     INT_H_SIZE, msg_destnode(mhdr));
+       msg_set_size(&pkthdr, pktmax);
+       msg_set_fragm_no(&pkthdr, pktno);
+
+       /* Prepare first fragment */
+       *chain = buf = tipc_buf_acquire(pktmax);
+       if (!buf)
+               return -ENOMEM;
+       pktpos = buf->data;
+       skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
+       pktpos += INT_H_SIZE;
+       pktrem -= INT_H_SIZE;
+       skb_copy_to_linear_data_offset(buf, INT_H_SIZE, mhdr, mhsz);
+       pktpos += mhsz;
+       pktrem -= mhsz;
+
+       do {
+               if (drem < pktrem)
+                       pktrem = drem;
+
+               if (memcpy_fromiovecend(pktpos, iov, offset, pktrem)) {
+                       rc = -EFAULT;
+                       goto error;
+               }
+               drem -= pktrem;
+               offset += pktrem;
+
+               if (!drem)
+                       break;
+
+               /* Prepare new fragment: */
+               if (drem < (pktmax - INT_H_SIZE))
+                       pktsz = drem + INT_H_SIZE;
+               else
+                       pktsz = pktmax;
+               prev = buf;
+               buf = tipc_buf_acquire(pktsz);
+               if (!buf) {
+                       rc = -ENOMEM;
+                       goto error;
+               }
+               prev->next = buf;
+               msg_set_type(&pkthdr, FRAGMENT);
+               msg_set_size(&pkthdr, pktsz);
+               msg_set_fragm_no(&pkthdr, ++pktno);
+               skb_copy_to_linear_data(buf, &pkthdr, INT_H_SIZE);
+               pktpos = buf->data + INT_H_SIZE;
+               pktrem = pktsz - INT_H_SIZE;
+
+       } while (1);
+
+       msg_set_type(buf_msg(buf), LAST_FRAGMENT);
+       return dsz;
+error:
+       kfree_skb_list(*chain);
+       *chain = NULL;
+       return rc;
+}
+
+/**
+ * tipc_msg_bundle(): Append contents of a buffer to tail of an existing one
+ * @bbuf: the existing buffer ("bundle")
+ * @buf:  buffer to be appended
+ * @mtu:  max allowable size for the bundle buffer
+ * Consumes buffer if successful
+ * Returns true if bundling could be performed, otherwise false
+ */
+bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu)
+{
+       struct tipc_msg *bmsg = buf_msg(bbuf);
+       struct tipc_msg *msg = buf_msg(buf);
+       unsigned int bsz = msg_size(bmsg);
+       unsigned int msz = msg_size(msg);
+       u32 start = align(bsz);
+       u32 max = mtu - INT_H_SIZE;
+       u32 pad = start - bsz;
+
+       if (likely(msg_user(msg) == MSG_FRAGMENTER))
+               return false;
+       if (unlikely(msg_user(msg) == CHANGEOVER_PROTOCOL))
+               return false;
+       if (unlikely(msg_user(msg) == BCAST_PROTOCOL))
+               return false;
+       if (likely(msg_user(bmsg) != MSG_BUNDLER))
+               return false;
+       if (likely(msg_type(bmsg) != BUNDLE_OPEN))
+               return false;
+       if (unlikely(skb_tailroom(bbuf) < (pad + msz)))
+               return false;
+       if (unlikely(max < (start + msz)))
+               return false;
+
+       skb_put(bbuf, pad + msz);
+       skb_copy_to_linear_data_offset(bbuf, start, buf->data, msz);
+       msg_set_size(bmsg, start + msz);
+       msg_set_msgcnt(bmsg, msg_msgcnt(bmsg) + 1);
+       bbuf->next = buf->next;
+       kfree_skb(buf);
+       return true;
+}
+
+/**
+ * tipc_msg_make_bundle(): Create bundle buf and append message to its tail
+ * @buf:  buffer to be appended and replaced
+ * @mtu:  max allowable size for the bundle buffer, inclusive header
+ * @dnode: destination node for message. (Not always present in header)
+ * Replaces buffer if successful
+ * Returns true if sucess, otherwise false
+ */
+bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode)
+{
+       struct sk_buff *bbuf;
+       struct tipc_msg *bmsg;
+       struct tipc_msg *msg = buf_msg(*buf);
+       u32 msz = msg_size(msg);
+       u32 max = mtu - INT_H_SIZE;
+
+       if (msg_user(msg) == MSG_FRAGMENTER)
+               return false;
+       if (msg_user(msg) == CHANGEOVER_PROTOCOL)
+               return false;
+       if (msg_user(msg) == BCAST_PROTOCOL)
+               return false;
+       if (msz > (max / 2))
+               return false;
+
+       bbuf = tipc_buf_acquire(max);
+       if (!bbuf)
+               return false;
+
+       skb_trim(bbuf, INT_H_SIZE);
+       bmsg = buf_msg(bbuf);
+       tipc_msg_init(bmsg, MSG_BUNDLER, BUNDLE_OPEN, INT_H_SIZE, dnode);
+       msg_set_seqno(bmsg, msg_seqno(msg));
+       msg_set_ack(bmsg, msg_ack(msg));
+       msg_set_bcast_ack(bmsg, msg_bcast_ack(msg));
+       bbuf->next = (*buf)->next;
+       tipc_msg_bundle(bbuf, *buf, mtu);
+       *buf = bbuf;
+       return true;
+}
+
+/**
+ * tipc_msg_reverse(): swap source and destination addresses and add error code
+ * @buf:  buffer containing message to be reversed
+ * @dnode: return value: node where to send message after reversal
+ * @err:  error code to be set in message
+ * Consumes buffer if failure
+ * Returns true if success, otherwise false
+ */
+bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       uint imp = msg_importance(msg);
+       struct tipc_msg ohdr;
+       uint rdsz = min_t(uint, msg_data_sz(msg), MAX_FORWARD_SIZE);
+
+       if (skb_linearize(buf))
+               goto exit;
+       if (msg_dest_droppable(msg))
+               goto exit;
+       if (msg_errcode(msg))
+               goto exit;
+
+       memcpy(&ohdr, msg, msg_hdr_sz(msg));
+       imp = min_t(uint, imp + 1, TIPC_CRITICAL_IMPORTANCE);
+       if (msg_isdata(msg))
+               msg_set_importance(msg, imp);
+       msg_set_errcode(msg, err);
+       msg_set_origport(msg, msg_destport(&ohdr));
+       msg_set_destport(msg, msg_origport(&ohdr));
+       msg_set_prevnode(msg, tipc_own_addr);
+       if (!msg_short(msg)) {
+               msg_set_orignode(msg, msg_destnode(&ohdr));
+               msg_set_destnode(msg, msg_orignode(&ohdr));
+       }
+       msg_set_size(msg, msg_hdr_sz(msg) + rdsz);
+       skb_trim(buf, msg_size(msg));
+       skb_orphan(buf);
+       *dnode = msg_orignode(&ohdr);
+       return true;
+exit:
+       kfree_skb(buf);
+       return false;
+}
+
+/**
+ * tipc_msg_eval: determine fate of message that found no destination
+ * @buf: the buffer containing the message.
+ * @dnode: return value: next-hop node, if message to be forwarded
+ * @err: error code to use, if message to be rejected
+ *
+ * Does not consume buffer
+ * Returns 0 (TIPC_OK) if message ok and we can try again, -TIPC error
+ * code if message to be rejected
+ */
+int tipc_msg_eval(struct sk_buff *buf, u32 *dnode)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       u32 dport;
+
+       if (msg_type(msg) != TIPC_NAMED_MSG)
+               return -TIPC_ERR_NO_PORT;
+       if (skb_linearize(buf))
+               return -TIPC_ERR_NO_NAME;
+       if (msg_data_sz(msg) > MAX_FORWARD_SIZE)
+               return -TIPC_ERR_NO_NAME;
+       if (msg_reroute_cnt(msg) > 0)
+               return -TIPC_ERR_NO_NAME;
+
+       *dnode = addr_domain(msg_lookup_scope(msg));
+       dport = tipc_nametbl_translate(msg_nametype(msg),
+                                      msg_nameinst(msg),
+                                      dnode);
+       if (!dport)
+               return -TIPC_ERR_NO_NAME;
+       msg_incr_reroute_cnt(msg);
+       msg_set_destnode(msg, *dnode);
+       msg_set_destport(msg, dport);
+       return TIPC_OK;
+}
+
+/* tipc_msg_reassemble() - clone a buffer chain of fragments and
+ *                         reassemble the clones into one message
+ */
+struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain)
+{
+       struct sk_buff *buf = chain;
+       struct sk_buff *frag = buf;
+       struct sk_buff *head = NULL;
+       int hdr_sz;
+
+       /* Copy header if single buffer */
+       if (!buf->next) {
+               hdr_sz = skb_headroom(buf) + msg_hdr_sz(buf_msg(buf));
+               return __pskb_copy(buf, hdr_sz, GFP_ATOMIC);
+       }
+
+       /* Clone all fragments and reassemble */
+       while (buf) {
+               frag = skb_clone(buf, GFP_ATOMIC);
+               if (!frag)
+                       goto error;
+               frag->next = NULL;
+               if (tipc_buf_append(&head, &frag))
+                       break;
+               if (!head)
+                       goto error;
+               buf = buf->next;
+       }
+       return frag;
+error:
+       pr_warn("Failed do clone local mcast rcv buffer\n");
+       kfree_skb(head);
+       return NULL;
+}
index 5035119..462fa19 100644 (file)
@@ -463,6 +463,11 @@ static inline struct tipc_msg *msg_get_wrapped(struct tipc_msg *m)
 #define FRAGMENT               1
 #define LAST_FRAGMENT          2
 
+/* Bundling protocol message types
+ */
+#define BUNDLE_OPEN             0
+#define BUNDLE_CLOSED           1
+
 /*
  * Link management protocol message types
  */
@@ -706,12 +711,36 @@ static inline void msg_set_link_tolerance(struct tipc_msg *m, u32 n)
        msg_set_bits(m, 9, 0, 0xffff, n);
 }
 
-u32 tipc_msg_tot_importance(struct tipc_msg *m);
+static inline u32 tipc_msg_tot_importance(struct tipc_msg *m)
+{
+       if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
+               return msg_importance(msg_get_wrapped(m));
+       return msg_importance(m);
+}
+
+static inline u32 msg_tot_origport(struct tipc_msg *m)
+{
+       if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT))
+               return msg_origport(msg_get_wrapped(m));
+       return msg_origport(m);
+}
+
+bool tipc_msg_reverse(struct sk_buff *buf, u32 *dnode, int err);
+
+int tipc_msg_eval(struct sk_buff *buf, u32 *dnode);
+
 void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize,
                   u32 destnode);
-int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
-                  unsigned int len, int max_size, struct sk_buff **buf);
 
 int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf);
 
+bool tipc_msg_bundle(struct sk_buff *bbuf, struct sk_buff *buf, u32 mtu);
+
+bool tipc_msg_make_bundle(struct sk_buff **buf, u32 mtu, u32 dnode);
+
+int tipc_msg_build(struct tipc_msg *mhdr, struct iovec const *iov,
+                  int offset, int dsz, int mtu , struct sk_buff **chain);
+
+struct sk_buff *tipc_msg_reassemble(struct sk_buff *chain);
+
 #endif
index 8ce7309..dcc15bc 100644 (file)
@@ -101,24 +101,22 @@ static struct sk_buff *named_prepare_buf(u32 type, u32 size, u32 dest)
 
 void named_cluster_distribute(struct sk_buff *buf)
 {
-       struct sk_buff *buf_copy;
-       struct tipc_node *n_ptr;
-       struct tipc_link *l_ptr;
+       struct sk_buff *obuf;
+       struct tipc_node *node;
+       u32 dnode;
 
        rcu_read_lock();
-       list_for_each_entry_rcu(n_ptr, &tipc_node_list, list) {
-               tipc_node_lock(n_ptr);
-               l_ptr = n_ptr->active_links[n_ptr->addr & 1];
-               if (l_ptr) {
-                       buf_copy = skb_copy(buf, GFP_ATOMIC);
-                       if (!buf_copy) {
-                               tipc_node_unlock(n_ptr);
-                               break;
-                       }
-                       msg_set_destnode(buf_msg(buf_copy), n_ptr->addr);
-                       __tipc_link_xmit(l_ptr, buf_copy);
-               }
-               tipc_node_unlock(n_ptr);
+       list_for_each_entry_rcu(node, &tipc_node_list, list) {
+               dnode = node->addr;
+               if (in_own_node(dnode))
+                       continue;
+               if (!tipc_node_active_links(node))
+                       continue;
+               obuf = skb_copy(buf, GFP_ATOMIC);
+               if (!obuf)
+                       break;
+               msg_set_destnode(buf_msg(obuf), dnode);
+               tipc_link_xmit(obuf, dnode, dnode);
        }
        rcu_read_unlock();
 
@@ -175,34 +173,44 @@ struct sk_buff *tipc_named_withdraw(struct publication *publ)
        return buf;
 }
 
-/*
+/**
  * named_distribute - prepare name info for bulk distribution to another node
+ * @msg_list: list of messages (buffers) to be returned from this function
+ * @dnode: node to be updated
+ * @pls: linked list of publication items to be packed into buffer chain
  */
-static void named_distribute(struct list_head *message_list, u32 node,
-                            struct publ_list *pls, u32 max_item_buf)
+static void named_distribute(struct list_head *msg_list, u32 dnode,
+                            struct publ_list *pls)
 {
        struct publication *publ;
        struct sk_buff *buf = NULL;
        struct distr_item *item = NULL;
-       u32 left = 0;
-       u32 rest = pls->size * ITEM_SIZE;
+       uint dsz = pls->size * ITEM_SIZE;
+       uint msg_dsz = (tipc_node_get_mtu(dnode, 0) / ITEM_SIZE) * ITEM_SIZE;
+       uint rem = dsz;
+       uint msg_rem = 0;
 
        list_for_each_entry(publ, &pls->list, local_list) {
+               /* Prepare next buffer: */
                if (!buf) {
-                       left = (rest <= max_item_buf) ? rest : max_item_buf;
-                       rest -= left;
-                       buf = named_prepare_buf(PUBLICATION, left, node);
+                       msg_rem = min_t(uint, rem, msg_dsz);
+                       rem -= msg_rem;
+                       buf = named_prepare_buf(PUBLICATION, msg_rem, dnode);
                        if (!buf) {
                                pr_warn("Bulk publication failure\n");
                                return;
                        }
                        item = (struct distr_item *)msg_data(buf_msg(buf));
                }
+
+               /* Pack publication into message: */
                publ_to_item(item, publ);
                item++;
-               left -= ITEM_SIZE;
-               if (!left) {
-                       list_add_tail((struct list_head *)buf, message_list);
+               msg_rem -= ITEM_SIZE;
+
+               /* Append full buffer to list: */
+               if (!msg_rem) {
+                       list_add_tail((struct list_head *)buf, msg_list);
                        buf = NULL;
                }
        }
@@ -211,16 +219,20 @@ static void named_distribute(struct list_head *message_list, u32 node,
 /**
  * tipc_named_node_up - tell specified node about all publications by this node
  */
-void tipc_named_node_up(u32 max_item_buf, u32 node)
+void tipc_named_node_up(u32 dnode)
 {
-       LIST_HEAD(message_list);
+       LIST_HEAD(msg_list);
+       struct sk_buff *buf_chain;
 
        read_lock_bh(&tipc_nametbl_lock);
-       named_distribute(&message_list, node, &publ_cluster, max_item_buf);
-       named_distribute(&message_list, node, &publ_zone, max_item_buf);
+       named_distribute(&msg_list, dnode, &publ_cluster);
+       named_distribute(&msg_list, dnode, &publ_zone);
        read_unlock_bh(&tipc_nametbl_lock);
 
-       tipc_link_names_xmit(&message_list, node);
+       /* Convert circular list to linear list and send: */
+       buf_chain = (struct sk_buff *)msg_list.next;
+       ((struct sk_buff *)msg_list.prev)->next = NULL;
+       tipc_link_xmit(buf_chain, dnode, dnode);
 }
 
 /**
index b2eed4e..8afe32b 100644 (file)
@@ -70,7 +70,7 @@ struct distr_item {
 struct sk_buff *tipc_named_publish(struct publication *publ);
 struct sk_buff *tipc_named_withdraw(struct publication *publ);
 void named_cluster_distribute(struct sk_buff *buf);
-void tipc_named_node_up(u32 max_item_buf, u32 node);
+void tipc_named_node_up(u32 dnode);
 void tipc_named_rcv(struct sk_buff *buf);
 void tipc_named_reinit(void);
 
index f64375e..7fcc949 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/net.c: TIPC network routing code
  *
- * Copyright (c) 1995-2006, Ericsson AB
+ * Copyright (c) 1995-2006, 2014, Ericsson AB
  * Copyright (c) 2005, 2010-2011, Wind River Systems
  * All rights reserved.
  *
  *     - A local spin_lock protecting the queue of subscriber events.
 */
 
-static void net_route_named_msg(struct sk_buff *buf)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       u32 dnode;
-       u32 dport;
-
-       if (!msg_named(msg)) {
-               kfree_skb(buf);
-               return;
-       }
-
-       dnode = addr_domain(msg_lookup_scope(msg));
-       dport = tipc_nametbl_translate(msg_nametype(msg), msg_nameinst(msg), &dnode);
-       if (dport) {
-               msg_set_destnode(msg, dnode);
-               msg_set_destport(msg, dport);
-               tipc_net_route_msg(buf);
-               return;
-       }
-       tipc_reject_msg(buf, TIPC_ERR_NO_NAME);
-}
-
-void tipc_net_route_msg(struct sk_buff *buf)
-{
-       struct tipc_msg *msg;
-       u32 dnode;
-
-       if (!buf)
-               return;
-       msg = buf_msg(buf);
-
-       /* Handle message for this node */
-       dnode = msg_short(msg) ? tipc_own_addr : msg_destnode(msg);
-       if (tipc_in_scope(dnode, tipc_own_addr)) {
-               if (msg_isdata(msg)) {
-                       if (msg_mcast(msg))
-                               tipc_port_mcast_rcv(buf, NULL);
-                       else if (msg_destport(msg))
-                               tipc_sk_rcv(buf);
-                       else
-                               net_route_named_msg(buf);
-                       return;
-               }
-               switch (msg_user(msg)) {
-               case NAME_DISTRIBUTOR:
-                       tipc_named_rcv(buf);
-                       break;
-               case CONN_MANAGER:
-                       tipc_port_proto_rcv(buf);
-                       break;
-               default:
-                       kfree_skb(buf);
-               }
-               return;
-       }
-
-       /* Handle message for another node */
-       skb_trim(buf, msg_size(msg));
-       tipc_link_xmit(buf, dnode, msg_link_selector(msg));
-}
-
 int tipc_net_start(u32 addr)
 {
        char addr_string[16];
index c6c2b46..59ef338 100644 (file)
@@ -37,8 +37,6 @@
 #ifndef _TIPC_NET_H
 #define _TIPC_NET_H
 
-void tipc_net_route_msg(struct sk_buff *buf);
-
 int tipc_net_start(u32 addr);
 void tipc_net_stop(void);
 
index 5b44c30..f706929 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * net/tipc/node.c: TIPC node management routines
  *
- * Copyright (c) 2000-2006, 2012 Ericsson AB
+ * Copyright (c) 2000-2006, 2012-2014, Ericsson AB
  * Copyright (c) 2005-2006, 2010-2014, Wind River Systems
  * All rights reserved.
  *
@@ -155,21 +155,25 @@ void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
        if (!active[0]) {
                active[0] = active[1] = l_ptr;
                node_established_contact(n_ptr);
-               return;
+               goto exit;
        }
        if (l_ptr->priority < active[0]->priority) {
                pr_info("New link <%s> becomes standby\n", l_ptr->name);
-               return;
+               goto exit;
        }
        tipc_link_dup_queue_xmit(active[0], l_ptr);
        if (l_ptr->priority == active[0]->priority) {
                active[0] = l_ptr;
-               return;
+               goto exit;
        }
        pr_info("Old link <%s> becomes standby\n", active[0]->name);
        if (active[1] != active[0])
                pr_info("Old link <%s> becomes standby\n", active[1]->name);
        active[0] = active[1] = l_ptr;
+exit:
+       /* Leave room for changeover header when returning 'mtu' to users: */
+       n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
+       n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
 }
 
 /**
@@ -229,6 +233,19 @@ void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
                tipc_link_failover_send_queue(l_ptr);
        else
                node_lost_contact(n_ptr);
+
+       /* Leave room for changeover header when returning 'mtu' to users: */
+       if (active[0]) {
+               n_ptr->act_mtus[0] = active[0]->max_pkt - INT_H_SIZE;
+               n_ptr->act_mtus[1] = active[1]->max_pkt - INT_H_SIZE;
+               return;
+       }
+
+       /* Loopback link went down? No fragmentation needed from now on. */
+       if (n_ptr->addr == tipc_own_addr) {
+               n_ptr->act_mtus[0] = MAX_MSG_SIZE;
+               n_ptr->act_mtus[1] = MAX_MSG_SIZE;
+       }
 }
 
 int tipc_node_active_links(struct tipc_node *n_ptr)
@@ -457,8 +474,6 @@ int tipc_node_get_linkname(u32 bearer_id, u32 addr, char *linkname, size_t len)
 void tipc_node_unlock(struct tipc_node *node)
 {
        LIST_HEAD(nsub_list);
-       struct tipc_link *link;
-       int pkt_sz = 0;
        u32 addr = 0;
 
        if (likely(!node->action_flags)) {
@@ -471,18 +486,13 @@ void tipc_node_unlock(struct tipc_node *node)
                node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
        }
        if (node->action_flags & TIPC_NOTIFY_NODE_UP) {
-               link = node->active_links[0];
                node->action_flags &= ~TIPC_NOTIFY_NODE_UP;
-               if (link) {
-                       pkt_sz = ((link->max_pkt - INT_H_SIZE) / ITEM_SIZE) *
-                                 ITEM_SIZE;
-                       addr = node->addr;
-               }
+               addr = node->addr;
        }
        spin_unlock_bh(&node->lock);
 
        if (!list_empty(&nsub_list))
                tipc_nodesub_notify(&nsub_list);
-       if (pkt_sz)
-               tipc_named_node_up(pkt_sz, addr);
+       if (addr)
+               tipc_named_node_up(addr);
 }
index 9087063..b61716a 100644 (file)
@@ -41,6 +41,7 @@
 #include "addr.h"
 #include "net.h"
 #include "bearer.h"
+#include "msg.h"
 
 /*
  * Out-of-range value for node signature
@@ -105,6 +106,7 @@ struct tipc_node {
        spinlock_t lock;
        struct hlist_node hash;
        struct tipc_link *active_links[2];
+       u32 act_mtus[2];
        struct tipc_link *links[MAX_BEARERS];
        unsigned int action_flags;
        struct tipc_node_bclink bclink;
@@ -143,4 +145,19 @@ static inline bool tipc_node_blocked(struct tipc_node *node)
                TIPC_NOTIFY_NODE_DOWN | TIPC_WAIT_OWN_LINKS_DOWN));
 }
 
+static inline uint tipc_node_get_mtu(u32 addr, u32 selector)
+{
+       struct tipc_node *node;
+       u32 mtu;
+
+       node = tipc_node_find(addr);
+
+       if (likely(node))
+               mtu = node->act_mtus[selector & 1];
+       else
+               mtu = MAX_MSG_SIZE;
+
+       return mtu;
+}
+
 #endif
index 7c59ab1..2d13eea 100644 (file)
@@ -84,11 +84,13 @@ void tipc_nodesub_unsubscribe(struct tipc_node_subscr *node_sub)
 void tipc_nodesub_notify(struct list_head *nsub_list)
 {
        struct tipc_node_subscr *ns, *safe;
+       net_ev_handler handle_node_down;
 
        list_for_each_entry_safe(ns, safe, nsub_list, nodesub_list) {
-               if (ns->handle_node_down) {
-                       ns->handle_node_down(ns->usr_handle);
+               handle_node_down = ns->handle_node_down;
+               if (handle_node_down) {
                        ns->handle_node_down = NULL;
+                       handle_node_down(ns->usr_handle);
                }
        }
 }
index 5fd7acc..7e096a5 100644 (file)
@@ -42,8 +42,6 @@
 
 /* Connection management: */
 #define PROBING_INTERVAL 3600000       /* [ms] => 1 h */
-#define CONFIRMED 0
-#define PROBING 1
 
 #define MAX_REJECT_SIZE 1024
 
@@ -76,124 +74,6 @@ int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg)
                (!peernode && (orignode == tipc_own_addr));
 }
 
-/**
- * tipc_port_mcast_xmit - send a multicast message to local and remote
- * destinations
- */
-int tipc_port_mcast_xmit(struct tipc_port *oport,
-                        struct tipc_name_seq const *seq,
-                        struct iovec const *msg_sect,
-                        unsigned int len)
-{
-       struct tipc_msg *hdr;
-       struct sk_buff *buf;
-       struct sk_buff *ibuf = NULL;
-       struct tipc_port_list dports = {0, NULL, };
-       int ext_targets;
-       int res;
-
-       /* Create multicast message */
-       hdr = &oport->phdr;
-       msg_set_type(hdr, TIPC_MCAST_MSG);
-       msg_set_lookup_scope(hdr, TIPC_CLUSTER_SCOPE);
-       msg_set_destport(hdr, 0);
-       msg_set_destnode(hdr, 0);
-       msg_set_nametype(hdr, seq->type);
-       msg_set_namelower(hdr, seq->lower);
-       msg_set_nameupper(hdr, seq->upper);
-       msg_set_hdr_sz(hdr, MCAST_H_SIZE);
-       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
-       if (unlikely(!buf))
-               return res;
-
-       /* Figure out where to send multicast message */
-       ext_targets = tipc_nametbl_mc_translate(seq->type, seq->lower, seq->upper,
-                                               TIPC_NODE_SCOPE, &dports);
-
-       /* Send message to destinations (duplicate it only if necessary) */
-       if (ext_targets) {
-               if (dports.count != 0) {
-                       ibuf = skb_copy(buf, GFP_ATOMIC);
-                       if (ibuf == NULL) {
-                               tipc_port_list_free(&dports);
-                               kfree_skb(buf);
-                               return -ENOMEM;
-                       }
-               }
-               res = tipc_bclink_xmit(buf);
-               if ((res < 0) && (dports.count != 0))
-                       kfree_skb(ibuf);
-       } else {
-               ibuf = buf;
-       }
-
-       if (res >= 0) {
-               if (ibuf)
-                       tipc_port_mcast_rcv(ibuf, &dports);
-       } else {
-               tipc_port_list_free(&dports);
-       }
-       return res;
-}
-
-/**
- * tipc_port_mcast_rcv - deliver multicast message to all destination ports
- *
- * If there is no port list, perform a lookup to create one
- */
-void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp)
-{
-       struct tipc_msg *msg;
-       struct tipc_port_list dports = {0, NULL, };
-       struct tipc_port_list *item = dp;
-       int cnt = 0;
-
-       msg = buf_msg(buf);
-
-       /* Create destination port list, if one wasn't supplied */
-       if (dp == NULL) {
-               tipc_nametbl_mc_translate(msg_nametype(msg),
-                                    msg_namelower(msg),
-                                    msg_nameupper(msg),
-                                    TIPC_CLUSTER_SCOPE,
-                                    &dports);
-               item = dp = &dports;
-       }
-
-       /* Deliver a copy of message to each destination port */
-       if (dp->count != 0) {
-               msg_set_destnode(msg, tipc_own_addr);
-               if (dp->count == 1) {
-                       msg_set_destport(msg, dp->ports[0]);
-                       tipc_sk_rcv(buf);
-                       tipc_port_list_free(dp);
-                       return;
-               }
-               for (; cnt < dp->count; cnt++) {
-                       int index = cnt % PLSIZE;
-                       struct sk_buff *b = skb_clone(buf, GFP_ATOMIC);
-
-                       if (b == NULL) {
-                               pr_warn("Unable to deliver multicast message(s)\n");
-                               goto exit;
-                       }
-                       if ((index == 0) && (cnt != 0))
-                               item = item->next;
-                       msg_set_destport(buf_msg(b), item->ports[index]);
-                       tipc_sk_rcv(b);
-               }
-       }
-exit:
-       kfree_skb(buf);
-       tipc_port_list_free(dp);
-}
-
-
-void tipc_port_wakeup(struct tipc_port *port)
-{
-       tipc_sock_wakeup(tipc_port_to_sock(port));
-}
-
 /* tipc_port_init - intiate TIPC port and lock it
  *
  * Returns obtained reference if initialization is successful, zero otherwise
@@ -235,6 +115,8 @@ u32 tipc_port_init(struct tipc_port *p_ptr,
 void tipc_port_destroy(struct tipc_port *p_ptr)
 {
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg = NULL;
+       u32 peer;
 
        tipc_withdraw(p_ptr, 0, NULL);
 
@@ -246,14 +128,15 @@ void tipc_port_destroy(struct tipc_port *p_ptr)
        if (p_ptr->connected) {
                buf = port_build_peer_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
                tipc_nodesub_unsubscribe(&p_ptr->subscription);
+               msg = buf_msg(buf);
+               peer = msg_destnode(msg);
+               tipc_link_xmit(buf, peer, msg_link_selector(msg));
        }
-
        spin_lock_bh(&tipc_port_list_lock);
        list_del(&p_ptr->port_list);
        list_del(&p_ptr->wait_list);
        spin_unlock_bh(&tipc_port_list_lock);
        k_term_timer(&p_ptr->timer);
-       tipc_net_route_msg(buf);
 }
 
 /*
@@ -275,100 +158,16 @@ static struct sk_buff *port_build_proto_msg(struct tipc_port *p_ptr,
                msg_set_destport(msg, tipc_port_peerport(p_ptr));
                msg_set_origport(msg, p_ptr->ref);
                msg_set_msgcnt(msg, ack);
+               buf->next = NULL;
        }
        return buf;
 }
 
-int tipc_reject_msg(struct sk_buff *buf, u32 err)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       struct sk_buff *rbuf;
-       struct tipc_msg *rmsg;
-       int hdr_sz;
-       u32 imp;
-       u32 data_sz = msg_data_sz(msg);
-       u32 src_node;
-       u32 rmsg_sz;
-
-       /* discard rejected message if it shouldn't be returned to sender */
-       if (WARN(!msg_isdata(msg),
-                "attempt to reject message with user=%u", msg_user(msg))) {
-               dump_stack();
-               goto exit;
-       }
-       if (msg_errcode(msg) || msg_dest_droppable(msg))
-               goto exit;
-
-       /*
-        * construct returned message by copying rejected message header and
-        * data (or subset), then updating header fields that need adjusting
-        */
-       hdr_sz = msg_hdr_sz(msg);
-       rmsg_sz = hdr_sz + min_t(u32, data_sz, MAX_REJECT_SIZE);
-
-       rbuf = tipc_buf_acquire(rmsg_sz);
-       if (rbuf == NULL)
-               goto exit;
-
-       rmsg = buf_msg(rbuf);
-       skb_copy_to_linear_data(rbuf, msg, rmsg_sz);
-
-       if (msg_connected(rmsg)) {
-               imp = msg_importance(rmsg);
-               if (imp < TIPC_CRITICAL_IMPORTANCE)
-                       msg_set_importance(rmsg, ++imp);
-       }
-       msg_set_non_seq(rmsg, 0);
-       msg_set_size(rmsg, rmsg_sz);
-       msg_set_errcode(rmsg, err);
-       msg_set_prevnode(rmsg, tipc_own_addr);
-       msg_swap_words(rmsg, 4, 5);
-       if (!msg_short(rmsg))
-               msg_swap_words(rmsg, 6, 7);
-
-       /* send self-abort message when rejecting on a connected port */
-       if (msg_connected(msg)) {
-               struct tipc_port *p_ptr = tipc_port_lock(msg_destport(msg));
-
-               if (p_ptr) {
-                       struct sk_buff *abuf = NULL;
-
-                       if (p_ptr->connected)
-                               abuf = port_build_self_abort_msg(p_ptr, err);
-                       tipc_port_unlock(p_ptr);
-                       tipc_net_route_msg(abuf);
-               }
-       }
-
-       /* send returned message & dispose of rejected message */
-       src_node = msg_prevnode(msg);
-       if (in_own_node(src_node))
-               tipc_sk_rcv(rbuf);
-       else
-               tipc_link_xmit(rbuf, src_node, msg_link_selector(rmsg));
-exit:
-       kfree_skb(buf);
-       return data_sz;
-}
-
-int tipc_port_iovec_reject(struct tipc_port *p_ptr, struct tipc_msg *hdr,
-                          struct iovec const *msg_sect, unsigned int len,
-                          int err)
-{
-       struct sk_buff *buf;
-       int res;
-
-       res = tipc_msg_build(hdr, msg_sect, len, MAX_MSG_SIZE, &buf);
-       if (!buf)
-               return res;
-
-       return tipc_reject_msg(buf, err);
-}
-
 static void port_timeout(unsigned long ref)
 {
        struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg = NULL;
 
        if (!p_ptr)
                return;
@@ -379,15 +178,16 @@ static void port_timeout(unsigned long ref)
        }
 
        /* Last probe answered ? */
-       if (p_ptr->probing_state == PROBING) {
+       if (p_ptr->probing_state == TIPC_CONN_PROBING) {
                buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_PORT);
        } else {
                buf = port_build_proto_msg(p_ptr, CONN_PROBE, 0);
-               p_ptr->probing_state = PROBING;
+               p_ptr->probing_state = TIPC_CONN_PROBING;
                k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
        }
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
 }
 
 
@@ -395,12 +195,14 @@ static void port_handle_node_down(unsigned long ref)
 {
        struct tipc_port *p_ptr = tipc_port_lock(ref);
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg = NULL;
 
        if (!p_ptr)
                return;
        buf = port_build_self_abort_msg(p_ptr, TIPC_ERR_NO_NODE);
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
 }
 
 
@@ -412,6 +214,7 @@ static struct sk_buff *port_build_self_abort_msg(struct tipc_port *p_ptr, u32 er
                struct tipc_msg *msg = buf_msg(buf);
                msg_swap_words(msg, 4, 5);
                msg_swap_words(msg, 6, 7);
+               buf->next = NULL;
        }
        return buf;
 }
@@ -436,60 +239,11 @@ static struct sk_buff *port_build_peer_abort_msg(struct tipc_port *p_ptr, u32 er
                if (imp < TIPC_CRITICAL_IMPORTANCE)
                        msg_set_importance(msg, ++imp);
                msg_set_errcode(msg, err);
+               buf->next = NULL;
        }
        return buf;
 }
 
-void tipc_port_proto_rcv(struct sk_buff *buf)
-{
-       struct tipc_msg *msg = buf_msg(buf);
-       struct tipc_port *p_ptr;
-       struct sk_buff *r_buf = NULL;
-       u32 destport = msg_destport(msg);
-       int wakeable;
-
-       /* Validate connection */
-       p_ptr = tipc_port_lock(destport);
-       if (!p_ptr || !p_ptr->connected || !tipc_port_peer_msg(p_ptr, msg)) {
-               r_buf = tipc_buf_acquire(BASIC_H_SIZE);
-               if (r_buf) {
-                       msg = buf_msg(r_buf);
-                       tipc_msg_init(msg, TIPC_HIGH_IMPORTANCE, TIPC_CONN_MSG,
-                                     BASIC_H_SIZE, msg_orignode(msg));
-                       msg_set_errcode(msg, TIPC_ERR_NO_PORT);
-                       msg_set_origport(msg, destport);
-                       msg_set_destport(msg, msg_origport(msg));
-               }
-               if (p_ptr)
-                       tipc_port_unlock(p_ptr);
-               goto exit;
-       }
-
-       /* Process protocol message sent by peer */
-       switch (msg_type(msg)) {
-       case CONN_ACK:
-               wakeable = tipc_port_congested(p_ptr) && p_ptr->congested;
-               p_ptr->acked += msg_msgcnt(msg);
-               if (!tipc_port_congested(p_ptr)) {
-                       p_ptr->congested = 0;
-                       if (wakeable)
-                               tipc_port_wakeup(p_ptr);
-               }
-               break;
-       case CONN_PROBE:
-               r_buf = port_build_proto_msg(p_ptr, CONN_PROBE_REPLY, 0);
-               break;
-       default:
-               /* CONN_PROBE_REPLY or unrecognized - no action required */
-               break;
-       }
-       p_ptr->probing_state = CONFIRMED;
-       tipc_port_unlock(p_ptr);
-exit:
-       tipc_net_route_msg(r_buf);
-       kfree_skb(buf);
-}
-
 static int port_print(struct tipc_port *p_ptr, char *buf, int len, int full_id)
 {
        struct publication *publ;
@@ -581,16 +335,19 @@ void tipc_acknowledge(u32 ref, u32 ack)
 {
        struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
+       struct tipc_msg *msg;
 
        p_ptr = tipc_port_lock(ref);
        if (!p_ptr)
                return;
-       if (p_ptr->connected) {
-               p_ptr->conn_unacked -= ack;
+       if (p_ptr->connected)
                buf = port_build_proto_msg(p_ptr, CONN_ACK, ack);
-       }
+
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       if (!buf)
+               return;
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
 }
 
 int tipc_publish(struct tipc_port *p_ptr, unsigned int scope,
@@ -689,7 +446,7 @@ int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
        msg_set_hdr_sz(msg, SHORT_H_SIZE);
 
        p_ptr->probing_interval = PROBING_INTERVAL;
-       p_ptr->probing_state = CONFIRMED;
+       p_ptr->probing_state = TIPC_CONN_OK;
        p_ptr->connected = 1;
        k_start_timer(&p_ptr->timer, p_ptr->probing_interval);
 
@@ -698,7 +455,7 @@ int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
                          (net_ev_handler)port_handle_node_down);
        res = 0;
 exit:
-       p_ptr->max_pkt = tipc_link_get_max_pkt(peer->node, ref);
+       p_ptr->max_pkt = tipc_node_get_mtu(peer->node, ref);
        return res;
 }
 
@@ -741,6 +498,7 @@ int tipc_port_disconnect(u32 ref)
  */
 int tipc_port_shutdown(u32 ref)
 {
+       struct tipc_msg *msg;
        struct tipc_port *p_ptr;
        struct sk_buff *buf = NULL;
 
@@ -750,149 +508,7 @@ int tipc_port_shutdown(u32 ref)
 
        buf = port_build_peer_abort_msg(p_ptr, TIPC_CONN_SHUTDOWN);
        tipc_port_unlock(p_ptr);
-       tipc_net_route_msg(buf);
+       msg = buf_msg(buf);
+       tipc_link_xmit(buf, msg_destnode(msg),  msg_link_selector(msg));
        return tipc_port_disconnect(ref);
 }
-
-/*
- *  tipc_port_iovec_rcv: Concatenate and deliver sectioned
- *                       message for this node.
- */
-static int tipc_port_iovec_rcv(struct tipc_port *sender,
-                              struct iovec const *msg_sect,
-                              unsigned int len)
-{
-       struct sk_buff *buf;
-       int res;
-
-       res = tipc_msg_build(&sender->phdr, msg_sect, len, MAX_MSG_SIZE, &buf);
-       if (likely(buf))
-               tipc_sk_rcv(buf);
-       return res;
-}
-
-/**
- * tipc_send - send message sections on connection
- */
-int tipc_send(struct tipc_port *p_ptr,
-             struct iovec const *msg_sect,
-             unsigned int len)
-{
-       u32 destnode;
-       int res;
-
-       if (!p_ptr->connected)
-               return -EINVAL;
-
-       p_ptr->congested = 1;
-       if (!tipc_port_congested(p_ptr)) {
-               destnode = tipc_port_peernode(p_ptr);
-               if (likely(!in_own_node(destnode)))
-                       res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
-                                                       destnode);
-               else
-                       res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
-
-               if (likely(res != -ELINKCONG)) {
-                       p_ptr->congested = 0;
-                       if (res > 0)
-                               p_ptr->sent++;
-                       return res;
-               }
-       }
-       if (tipc_port_unreliable(p_ptr)) {
-               p_ptr->congested = 0;
-               return len;
-       }
-       return -ELINKCONG;
-}
-
-/**
- * tipc_send2name - send message sections to port name
- */
-int tipc_send2name(struct tipc_port *p_ptr,
-                  struct tipc_name const *name,
-                  unsigned int domain,
-                  struct iovec const *msg_sect,
-                  unsigned int len)
-{
-       struct tipc_msg *msg;
-       u32 destnode = domain;
-       u32 destport;
-       int res;
-
-       if (p_ptr->connected)
-               return -EINVAL;
-
-       msg = &p_ptr->phdr;
-       msg_set_type(msg, TIPC_NAMED_MSG);
-       msg_set_hdr_sz(msg, NAMED_H_SIZE);
-       msg_set_nametype(msg, name->type);
-       msg_set_nameinst(msg, name->instance);
-       msg_set_lookup_scope(msg, tipc_addr_scope(domain));
-       destport = tipc_nametbl_translate(name->type, name->instance, &destnode);
-       msg_set_destnode(msg, destnode);
-       msg_set_destport(msg, destport);
-
-       if (likely(destport || destnode)) {
-               if (likely(in_own_node(destnode)))
-                       res = tipc_port_iovec_rcv(p_ptr, msg_sect, len);
-               else if (tipc_own_addr)
-                       res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
-                                                       destnode);
-               else
-                       res = tipc_port_iovec_reject(p_ptr, msg, msg_sect,
-                                                    len, TIPC_ERR_NO_NODE);
-               if (likely(res != -ELINKCONG)) {
-                       if (res > 0)
-                               p_ptr->sent++;
-                       return res;
-               }
-               if (tipc_port_unreliable(p_ptr))
-                       return len;
-
-               return -ELINKCONG;
-       }
-       return tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
-                                     TIPC_ERR_NO_NAME);
-}
-
-/**
- * tipc_send2port - send message sections to port identity
- */
-int tipc_send2port(struct tipc_port *p_ptr,
-                  struct tipc_portid const *dest,
-                  struct iovec const *msg_sect,
-                  unsigned int len)
-{
-       struct tipc_msg *msg;
-       int res;
-
-       if (p_ptr->connected)
-               return -EINVAL;
-
-       msg = &p_ptr->phdr;
-       msg_set_type(msg, TIPC_DIRECT_MSG);
-       msg_set_lookup_scope(msg, 0);
-       msg_set_destnode(msg, dest->node);
-       msg_set_destport(msg, dest->ref);
-       msg_set_hdr_sz(msg, BASIC_H_SIZE);
-
-       if (in_own_node(dest->node))
-               res =  tipc_port_iovec_rcv(p_ptr, msg_sect, len);
-       else if (tipc_own_addr)
-               res = tipc_link_iovec_xmit_fast(p_ptr, msg_sect, len,
-                                               dest->node);
-       else
-               res = tipc_port_iovec_reject(p_ptr, msg, msg_sect, len,
-                                               TIPC_ERR_NO_NODE);
-       if (likely(res != -ELINKCONG)) {
-               if (res > 0)
-                       p_ptr->sent++;
-               return res;
-       }
-       if (tipc_port_unreliable(p_ptr))
-               return len;
-
-       return -ELINKCONG;
-}
index cf4ca5b..3f93454 100644 (file)
  * @connected: non-zero if port is currently connected to a peer port
  * @conn_type: TIPC type used when connection was established
  * @conn_instance: TIPC instance used when connection was established
- * @conn_unacked: number of unacknowledged messages received from peer port
  * @published: non-zero if port has one or more associated names
- * @congested: non-zero if cannot send because of link or port congestion
  * @max_pkt: maximum packet size "hint" used when building messages sent by port
  * @ref: unique reference to port in TIPC object registry
  * @phdr: preformatted message header used when sending messages
  * @port_list: adjacent ports in TIPC's global list of ports
  * @wait_list: adjacent ports in list of ports waiting on link congestion
  * @waiting_pkts:
- * @sent: # of non-empty messages sent by port
- * @acked: # of non-empty message acknowledgements from connected port's peer
  * @publications: list of publications for port
  * @pub_count: total # of publications port has made during its lifetime
  * @probing_state:
@@ -76,17 +72,13 @@ struct tipc_port {
        int connected;
        u32 conn_type;
        u32 conn_instance;
-       u32 conn_unacked;
        int published;
-       u32 congested;
        u32 max_pkt;
        u32 ref;
        struct tipc_msg phdr;
        struct list_head port_list;
        struct list_head wait_list;
        u32 waiting_pkts;
-       u32 sent;
-       u32 acked;
        struct list_head publications;
        u32 pub_count;
        u32 probing_state;
@@ -104,8 +96,6 @@ struct tipc_port_list;
 u32 tipc_port_init(struct tipc_port *p_ptr,
                   const unsigned int importance);
 
-int tipc_reject_msg(struct sk_buff *buf, u32 err);
-
 void tipc_acknowledge(u32 port_ref, u32 ack);
 
 void tipc_port_destroy(struct tipc_port *p_ptr);
@@ -122,8 +112,6 @@ int tipc_port_disconnect(u32 portref);
 
 int tipc_port_shutdown(u32 ref);
 
-void tipc_port_wakeup(struct tipc_port *port);
-
 /*
  * The following routines require that the port be locked on entry
  */
@@ -132,39 +120,7 @@ int __tipc_port_connect(u32 ref, struct tipc_port *p_ptr,
                   struct tipc_portid const *peer);
 int tipc_port_peer_msg(struct tipc_port *p_ptr, struct tipc_msg *msg);
 
-/*
- * TIPC messaging routines
- */
-
-int tipc_send(struct tipc_port *port,
-             struct iovec const *msg_sect,
-             unsigned int len);
-
-int tipc_send2name(struct tipc_port *port,
-                  struct tipc_name const *name,
-                  u32 domain,
-                  struct iovec const *msg_sect,
-                  unsigned int len);
-
-int tipc_send2port(struct tipc_port *port,
-                  struct tipc_portid const *dest,
-                  struct iovec const *msg_sect,
-                  unsigned int len);
-
-int tipc_port_mcast_xmit(struct tipc_port *port,
-                        struct tipc_name_seq const *seq,
-                        struct iovec const *msg,
-                        unsigned int len);
-
-int tipc_port_iovec_reject(struct tipc_port *p_ptr,
-                          struct tipc_msg *hdr,
-                          struct iovec const *msg_sect,
-                          unsigned int len,
-                          int err);
-
 struct sk_buff *tipc_port_get_ports(void);
-void tipc_port_proto_rcv(struct sk_buff *buf);
-void tipc_port_mcast_rcv(struct sk_buff *buf, struct tipc_port_list *dp);
 void tipc_port_reinit(void);
 
 /**
@@ -185,12 +141,6 @@ static inline void tipc_port_unlock(struct tipc_port *p_ptr)
        spin_unlock_bh(p_ptr->lock);
 }
 
-static inline int tipc_port_congested(struct tipc_port *p_ptr)
-{
-       return ((p_ptr->sent - p_ptr->acked) >= TIPC_FLOWCTRL_WIN);
-}
-
-
 static inline u32 tipc_port_peernode(struct tipc_port *p_ptr)
 {
        return msg_destnode(&p_ptr->phdr);
index ef04755..7d423ee 100644 (file)
 
 #include "core.h"
 #include "port.h"
+#include "name_table.h"
 #include "node.h"
-
+#include "link.h"
 #include <linux/export.h>
 
 #define SS_LISTENING   -1      /* socket is listening */
 #define SS_READY       -2      /* socket is connectionless */
 
 #define CONN_TIMEOUT_DEFAULT   8000    /* default connect timeout = 8s */
+#define TIPC_FWD_MSG           1
 
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *skb);
 static void tipc_data_ready(struct sock *sk);
 static void tipc_write_space(struct sock *sk);
 static int tipc_release(struct socket *sock);
 static int tipc_accept(struct socket *sock, struct socket *new_sock, int flags);
+static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p);
 
 static const struct proto_ops packet_ops;
 static const struct proto_ops stream_ops;
@@ -123,9 +126,12 @@ static void advance_rx_queue(struct sock *sk)
 static void reject_rx_queue(struct sock *sk)
 {
        struct sk_buff *buf;
+       u32 dnode;
 
-       while ((buf = __skb_dequeue(&sk->sk_receive_queue)))
-               tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+       while ((buf = __skb_dequeue(&sk->sk_receive_queue))) {
+               if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
+                       tipc_link_xmit(buf, dnode, 0);
+       }
 }
 
 /**
@@ -201,6 +207,7 @@ static int tipc_sk_create(struct net *net, struct socket *sock,
        sk->sk_data_ready = tipc_data_ready;
        sk->sk_write_space = tipc_write_space;
        tsk->conn_timeout = CONN_TIMEOUT_DEFAULT;
+       tsk->sent_unacked = 0;
        atomic_set(&tsk->dupl_rcvcnt, 0);
        tipc_port_unlock(port);
 
@@ -303,6 +310,7 @@ static int tipc_release(struct socket *sock)
        struct tipc_sock *tsk;
        struct tipc_port *port;
        struct sk_buff *buf;
+       u32 dnode;
 
        /*
         * Exit if socket isn't fully initialized (occurs when a failed accept()
@@ -331,7 +339,8 @@ static int tipc_release(struct socket *sock)
                                sock->state = SS_DISCONNECTING;
                                tipc_port_disconnect(port->ref);
                        }
-                       tipc_reject_msg(buf, TIPC_ERR_NO_PORT);
+                       if (tipc_msg_reverse(buf, &dnode, TIPC_ERR_NO_PORT))
+                               tipc_link_xmit(buf, dnode, 0);
                }
        }
 
@@ -504,12 +513,12 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
 
        switch ((int)sock->state) {
        case SS_UNCONNECTED:
-               if (!tsk->port.congested)
+               if (!tsk->link_cong)
                        mask |= POLLOUT;
                break;
        case SS_READY:
        case SS_CONNECTED:
-               if (!tsk->port.congested)
+               if (!tsk->link_cong && !tipc_sk_conn_cong(tsk))
                        mask |= POLLOUT;
                /* fall thru' */
        case SS_CONNECTING:
@@ -525,6 +534,136 @@ static unsigned int tipc_poll(struct file *file, struct socket *sock,
        return mask;
 }
 
+/**
+ * tipc_sendmcast - send multicast message
+ * @sock: socket structure
+ * @seq: destination address
+ * @iov: message data to send
+ * @dsz: total length of message data
+ * @timeo: timeout to wait for wakeup
+ *
+ * Called from function tipc_sendmsg(), which has done all sanity checks
+ * Returns the number of bytes sent on success, or errno
+ */
+static int tipc_sendmcast(struct  socket *sock, struct tipc_name_seq *seq,
+                         struct iovec *iov, size_t dsz, long timeo)
+{
+       struct sock *sk = sock->sk;
+       struct tipc_msg *mhdr = &tipc_sk(sk)->port.phdr;
+       struct sk_buff *buf;
+       uint mtu;
+       int rc;
+
+       msg_set_type(mhdr, TIPC_MCAST_MSG);
+       msg_set_lookup_scope(mhdr, TIPC_CLUSTER_SCOPE);
+       msg_set_destport(mhdr, 0);
+       msg_set_destnode(mhdr, 0);
+       msg_set_nametype(mhdr, seq->type);
+       msg_set_namelower(mhdr, seq->lower);
+       msg_set_nameupper(mhdr, seq->upper);
+       msg_set_hdr_sz(mhdr, MCAST_H_SIZE);
+
+new_mtu:
+       mtu = tipc_bclink_get_mtu();
+       rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+       if (unlikely(rc < 0))
+               return rc;
+
+       do {
+               rc = tipc_bclink_xmit(buf);
+               if (likely(rc >= 0)) {
+                       rc = dsz;
+                       break;
+               }
+               if (rc == -EMSGSIZE)
+                       goto new_mtu;
+               if (rc != -ELINKCONG)
+                       break;
+               rc = tipc_wait_for_sndmsg(sock, &timeo);
+               if (rc)
+                       kfree_skb_list(buf);
+       } while (!rc);
+       return rc;
+}
+
+/* tipc_sk_mcast_rcv - Deliver multicast message to all destination sockets
+ */
+void tipc_sk_mcast_rcv(struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_port_list dports = {0, NULL, };
+       struct tipc_port_list *item;
+       struct sk_buff *b;
+       uint i, last, dst = 0;
+       u32 scope = TIPC_CLUSTER_SCOPE;
+
+       if (in_own_node(msg_orignode(msg)))
+               scope = TIPC_NODE_SCOPE;
+
+       /* Create destination port list: */
+       tipc_nametbl_mc_translate(msg_nametype(msg),
+                                 msg_namelower(msg),
+                                 msg_nameupper(msg),
+                                 scope,
+                                 &dports);
+       last = dports.count;
+       if (!last) {
+               kfree_skb(buf);
+               return;
+       }
+
+       for (item = &dports; item; item = item->next) {
+               for (i = 0; i < PLSIZE && ++dst <= last; i++) {
+                       b = (dst != last) ? skb_clone(buf, GFP_ATOMIC) : buf;
+                       if (!b) {
+                               pr_warn("Failed do clone mcast rcv buffer\n");
+                               continue;
+                       }
+                       msg_set_destport(msg, item->ports[i]);
+                       tipc_sk_rcv(b);
+               }
+       }
+       tipc_port_list_free(&dports);
+}
+
+/**
+ * tipc_sk_proto_rcv - receive a connection mng protocol message
+ * @tsk: receiving socket
+ * @dnode: node to send response message to, if any
+ * @buf: buffer containing protocol message
+ * Returns 0 (TIPC_OK) if message was consumed, 1 (TIPC_FWD_MSG) if
+ * (CONN_PROBE_REPLY) message should be forwarded.
+ */
+static int tipc_sk_proto_rcv(struct tipc_sock *tsk, u32 *dnode,
+                            struct sk_buff *buf)
+{
+       struct tipc_msg *msg = buf_msg(buf);
+       struct tipc_port *port = &tsk->port;
+       int conn_cong;
+
+       /* Ignore if connection cannot be validated: */
+       if (!port->connected || !tipc_port_peer_msg(port, msg))
+               goto exit;
+
+       port->probing_state = TIPC_CONN_OK;
+
+       if (msg_type(msg) == CONN_ACK) {
+               conn_cong = tipc_sk_conn_cong(tsk);
+               tsk->sent_unacked -= msg_msgcnt(msg);
+               if (conn_cong)
+                       tipc_sock_wakeup(tsk);
+       } else if (msg_type(msg) == CONN_PROBE) {
+               if (!tipc_msg_reverse(buf, dnode, TIPC_OK))
+                       return TIPC_OK;
+               msg_set_type(msg, CONN_PROBE_REPLY);
+               return TIPC_FWD_MSG;
+       }
+       /* Do nothing if msg_type() == CONN_PROBE_REPLY */
+exit:
+       kfree_skb(buf);
+       return TIPC_OK;
+}
+
 /**
  * dest_name_check - verify user is permitted to send to specified port name
  * @dest: destination address
@@ -539,6 +678,8 @@ static int dest_name_check(struct sockaddr_tipc *dest, struct msghdr *m)
 {
        struct tipc_cfg_msg_hdr hdr;
 
+       if (unlikely(dest->addrtype == TIPC_ADDR_ID))
+               return 0;
        if (likely(dest->addr.name.name.type >= TIPC_RESERVED_TYPES))
                return 0;
        if (likely(dest->addr.name.name.type == TIPC_TOP_SRV))
@@ -575,19 +716,18 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
                        return sock_intr_errno(*timeo_p);
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-               done = sk_wait_event(sk, timeo_p, !tsk->port.congested);
+               done = sk_wait_event(sk, timeo_p, !tsk->link_cong);
                finish_wait(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
 }
 
-
 /**
  * tipc_sendmsg - send message in connectionless manner
  * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
  * @m: message to send
- * @total_len: length of message
+ * @dsz: amount of user data to be sent
  *
  * Message must have an destination specified explicitly.
  * Used for SOCK_RDM and SOCK_DGRAM messages,
@@ -597,100 +737,123 @@ static int tipc_wait_for_sndmsg(struct socket *sock, long *timeo_p)
  * Returns the number of bytes sent on success, or errno otherwise
  */
 static int tipc_sendmsg(struct kiocb *iocb, struct socket *sock,
-                       struct msghdr *m, size_t total_len)
+                       struct msghdr *m, size_t dsz)
 {
+       DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_port *port = &tsk->port;
-       DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-       int needs_conn;
+       struct tipc_msg *mhdr = &port->phdr;
+       struct iovec *iov = m->msg_iov;
+       u32 dnode, dport;
+       struct sk_buff *buf;
+       struct tipc_name_seq *seq = &dest->addr.nameseq;
+       u32 mtu;
        long timeo;
-       int res = -EINVAL;
+       int rc = -EINVAL;
 
        if (unlikely(!dest))
                return -EDESTADDRREQ;
+
        if (unlikely((m->msg_namelen < sizeof(*dest)) ||
                     (dest->family != AF_TIPC)))
                return -EINVAL;
-       if (total_len > TIPC_MAX_USER_MSG_SIZE)
+
+       if (dsz > TIPC_MAX_USER_MSG_SIZE)
                return -EMSGSIZE;
 
        if (iocb)
                lock_sock(sk);
 
-       needs_conn = (sock->state != SS_READY);
-       if (unlikely(needs_conn)) {
+       if (unlikely(sock->state != SS_READY)) {
                if (sock->state == SS_LISTENING) {
-                       res = -EPIPE;
+                       rc = -EPIPE;
                        goto exit;
                }
                if (sock->state != SS_UNCONNECTED) {
-                       res = -EISCONN;
+                       rc = -EISCONN;
                        goto exit;
                }
                if (tsk->port.published) {
-                       res = -EOPNOTSUPP;
+                       rc = -EOPNOTSUPP;
                        goto exit;
                }
                if (dest->addrtype == TIPC_ADDR_NAME) {
                        tsk->port.conn_type = dest->addr.name.name.type;
                        tsk->port.conn_instance = dest->addr.name.name.instance;
                }
-
-               /* Abort any pending connection attempts (very unlikely) */
-               reject_rx_queue(sk);
        }
+       rc = dest_name_check(dest, m);
+       if (rc)
+               goto exit;
 
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
-       do {
-               if (dest->addrtype == TIPC_ADDR_NAME) {
-                       res = dest_name_check(dest, m);
-                       if (res)
-                               break;
-                       res = tipc_send2name(port,
-                                            &dest->addr.name.name,
-                                            dest->addr.name.domain,
-                                            m->msg_iov,
-                                            total_len);
-               } else if (dest->addrtype == TIPC_ADDR_ID) {
-                       res = tipc_send2port(port,
-                                            &dest->addr.id,
-                                            m->msg_iov,
-                                            total_len);
-               } else if (dest->addrtype == TIPC_ADDR_MCAST) {
-                       if (needs_conn) {
-                               res = -EOPNOTSUPP;
-                               break;
-                       }
-                       res = dest_name_check(dest, m);
-                       if (res)
-                               break;
-                       res = tipc_port_mcast_xmit(port,
-                                                  &dest->addr.nameseq,
-                                                  m->msg_iov,
-                                                  total_len);
+
+       if (dest->addrtype == TIPC_ADDR_MCAST) {
+               rc = tipc_sendmcast(sock, seq, iov, dsz, timeo);
+               goto exit;
+       } else if (dest->addrtype == TIPC_ADDR_NAME) {
+               u32 type = dest->addr.name.name.type;
+               u32 inst = dest->addr.name.name.instance;
+               u32 domain = dest->addr.name.domain;
+
+               dnode = domain;
+               msg_set_type(mhdr, TIPC_NAMED_MSG);
+               msg_set_hdr_sz(mhdr, NAMED_H_SIZE);
+               msg_set_nametype(mhdr, type);
+               msg_set_nameinst(mhdr, inst);
+               msg_set_lookup_scope(mhdr, tipc_addr_scope(domain));
+               dport = tipc_nametbl_translate(type, inst, &dnode);
+               msg_set_destnode(mhdr, dnode);
+               msg_set_destport(mhdr, dport);
+               if (unlikely(!dport && !dnode)) {
+                       rc = -EHOSTUNREACH;
+                       goto exit;
                }
-               if (likely(res != -ELINKCONG)) {
-                       if (needs_conn && (res >= 0))
+       } else if (dest->addrtype == TIPC_ADDR_ID) {
+               dnode = dest->addr.id.node;
+               msg_set_type(mhdr, TIPC_DIRECT_MSG);
+               msg_set_lookup_scope(mhdr, 0);
+               msg_set_destnode(mhdr, dnode);
+               msg_set_destport(mhdr, dest->addr.id.ref);
+               msg_set_hdr_sz(mhdr, BASIC_H_SIZE);
+       }
+
+new_mtu:
+       mtu = tipc_node_get_mtu(dnode, tsk->port.ref);
+       rc = tipc_msg_build(mhdr, iov, 0, dsz, mtu, &buf);
+       if (rc < 0)
+               goto exit;
+
+       do {
+               rc = tipc_link_xmit(buf, dnode, tsk->port.ref);
+               if (likely(rc >= 0)) {
+                       if (sock->state != SS_READY)
                                sock->state = SS_CONNECTING;
+                       rc = dsz;
                        break;
                }
-               res = tipc_wait_for_sndmsg(sock, &timeo);
-               if (res)
+               if (rc == -EMSGSIZE)
+                       goto new_mtu;
+
+               if (rc != -ELINKCONG)
                        break;
-       } while (1);
 
+               rc = tipc_wait_for_sndmsg(sock, &timeo);
+               if (rc)
+                       kfree_skb_list(buf);
+       } while (!rc);
 exit:
        if (iocb)
                release_sock(sk);
-       return res;
+
+       return rc;
 }
 
 static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
-       struct tipc_port *port = &tsk->port;
        DEFINE_WAIT(wait);
        int done;
 
@@ -709,37 +872,49 @@ static int tipc_wait_for_sndpkt(struct socket *sock, long *timeo_p)
 
                prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
                done = sk_wait_event(sk, timeo_p,
-                                    (!port->congested || !port->connected));
+                                    (!tsk->link_cong &&
+                                     !tipc_sk_conn_cong(tsk)) ||
+                                    !tsk->port.connected);
                finish_wait(sk_sleep(sk), &wait);
        } while (!done);
        return 0;
 }
 
 /**
- * tipc_send_packet - send a connection-oriented message
- * @iocb: if NULL, indicates that socket lock is already held
+ * tipc_send_stream - send stream-oriented data
+ * @iocb: (unused)
  * @sock: socket structure
- * @m: message to send
- * @total_len: length of message
+ * @m: data to send
+ * @dsz: total length of data to be transmitted
  *
- * Used for SOCK_SEQPACKET messages and SOCK_STREAM data.
+ * Used for SOCK_STREAM data.
  *
- * Returns the number of bytes sent on success, or errno otherwise
+ * Returns the number of bytes sent on success (or partial success),
+ * or errno if no data sent
  */
-static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *m, size_t total_len)
+static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
+                           struct msghdr *m, size_t dsz)
 {
        struct sock *sk = sock->sk;
        struct tipc_sock *tsk = tipc_sk(sk);
+       struct tipc_port *port = &tsk->port;
+       struct tipc_msg *mhdr = &port->phdr;
+       struct sk_buff *buf;
        DECLARE_SOCKADDR(struct sockaddr_tipc *, dest, m->msg_name);
-       int res = -EINVAL;
+       u32 ref = port->ref;
+       int rc = -EINVAL;
        long timeo;
+       u32 dnode;
+       uint mtu, send, sent = 0;
 
        /* Handle implied connection establishment */
-       if (unlikely(dest))
-               return tipc_sendmsg(iocb, sock, m, total_len);
-
-       if (total_len > TIPC_MAX_USER_MSG_SIZE)
+       if (unlikely(dest)) {
+               rc = tipc_sendmsg(iocb, sock, m, dsz);
+               if (dsz && (dsz == rc))
+                       tsk->sent_unacked = 1;
+               return rc;
+       }
+       if (dsz > (uint)INT_MAX)
                return -EMSGSIZE;
 
        if (iocb)
@@ -747,123 +922,66 @@ static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
 
        if (unlikely(sock->state != SS_CONNECTED)) {
                if (sock->state == SS_DISCONNECTING)
-                       res = -EPIPE;
+                       rc = -EPIPE;
                else
-                       res = -ENOTCONN;
+                       rc = -ENOTCONN;
                goto exit;
        }
 
        timeo = sock_sndtimeo(sk, m->msg_flags & MSG_DONTWAIT);
+       dnode = tipc_port_peernode(port);
+
+next:
+       mtu = port->max_pkt;
+       send = min_t(uint, dsz - sent, TIPC_MAX_USER_MSG_SIZE);
+       rc = tipc_msg_build(mhdr, m->msg_iov, sent, send, mtu, &buf);
+       if (unlikely(rc < 0))
+               goto exit;
        do {
-               res = tipc_send(&tsk->port, m->msg_iov, total_len);
-               if (likely(res != -ELINKCONG))
-                       break;
-               res = tipc_wait_for_sndpkt(sock, &timeo);
-               if (res)
-                       break;
-       } while (1);
+               if (likely(!tipc_sk_conn_cong(tsk))) {
+                       rc = tipc_link_xmit(buf, dnode, ref);
+                       if (likely(!rc)) {
+                               tsk->sent_unacked++;
+                               sent += send;
+                               if (sent == dsz)
+                                       break;
+                               goto next;
+                       }
+                       if (rc == -EMSGSIZE) {
+                               port->max_pkt = tipc_node_get_mtu(dnode, ref);
+                               goto next;
+                       }
+                       if (rc != -ELINKCONG)
+                               break;
+               }
+               rc = tipc_wait_for_sndpkt(sock, &timeo);
+               if (rc)
+                       kfree_skb_list(buf);
+       } while (!rc);
 exit:
        if (iocb)
                release_sock(sk);
-       return res;
+       return sent ? sent : rc;
 }
 
 /**
- * tipc_send_stream - send stream-oriented data
- * @iocb: (unused)
+ * tipc_send_packet - send a connection-oriented message
+ * @iocb: if NULL, indicates that socket lock is already held
  * @sock: socket structure
- * @m: data to send
- * @total_len: total length of data to be sent
+ * @m: message to send
+ * @dsz: length of data to be transmitted
  *
- * Used for SOCK_STREAM data.
+ * Used for SOCK_SEQPACKET messages.
  *
- * Returns the number of bytes sent on success (or partial success),
- * or errno if no data sent
+ * Returns the number of bytes sent on success, or errno otherwise
  */
-static int tipc_send_stream(struct kiocb *iocb, struct socket *sock,
-                           struct msghdr *m, size_t total_len)
+static int tipc_send_packet(struct kiocb *iocb, struct socket *sock,
+                           struct msghdr *m, size_t dsz)
 {
-       struct sock *sk = sock->sk;
-       struct tipc_sock *tsk = tipc_sk(sk);
-       struct msghdr my_msg;
-       struct iovec my_iov;
-       struct iovec *curr_iov;
-       int curr_iovlen;
-       char __user *curr_start;
-       u32 hdr_size;
-       int curr_left;
-       int bytes_to_send;
-       int bytes_sent;
-       int res;
-
-       lock_sock(sk);
-
-       /* Handle special cases where there is no connection */
-       if (unlikely(sock->state != SS_CONNECTED)) {
-               if (sock->state == SS_UNCONNECTED)
-                       res = tipc_send_packet(NULL, sock, m, total_len);
-               else
-                       res = sock->state == SS_DISCONNECTING ? -EPIPE : -ENOTCONN;
-               goto exit;
-       }
-
-       if (unlikely(m->msg_name)) {
-               res = -EISCONN;
-               goto exit;
-       }
-
-       if (total_len > (unsigned int)INT_MAX) {
-               res = -EMSGSIZE;
-               goto exit;
-       }
-
-       /*
-        * Send each iovec entry using one or more messages
-        *
-        * Note: This algorithm is good for the most likely case
-        * (i.e. one large iovec entry), but could be improved to pass sets
-        * of small iovec entries into send_packet().
-        */
-       curr_iov = m->msg_iov;
-       curr_iovlen = m->msg_iovlen;
-       my_msg.msg_iov = &my_iov;
-       my_msg.msg_iovlen = 1;
-       my_msg.msg_flags = m->msg_flags;
-       my_msg.msg_name = NULL;
-       bytes_sent = 0;
-
-       hdr_size = msg_hdr_sz(&tsk->port.phdr);
-
-       while (curr_iovlen--) {
-               curr_start = curr_iov->iov_base;
-               curr_left = curr_iov->iov_len;
-
-               while (curr_left) {
-                       bytes_to_send = tsk->port.max_pkt - hdr_size;
-                       if (bytes_to_send > TIPC_MAX_USER_MSG_SIZE)
-                               bytes_to_send = TIPC_MAX_USER_MSG_SIZE;
-                       if (curr_left < bytes_to_send)
-                               bytes_to_send = curr_left;
-                       my_iov.iov_base = curr_start;
-                       my_iov.iov_len = bytes_to_send;
-                       res = tipc_send_packet(NULL, sock, &my_msg,
-                                              bytes_to_send);
-                       if (res < 0) {
-                               if (bytes_sent)
-                                       res = bytes_sent;
-                               goto exit;
-                       }
-                       curr_left -= bytes_to_send;
-                       curr_start += bytes_to_send;
-                       bytes_sent += bytes_to_send;
-               }
+       if (dsz > TIPC_MAX_USER_MSG_SIZE)
+               return -EMSGSIZE;
 
-               curr_iov++;
-       }
-       res = bytes_sent;
-exit:
-       release_sock(sk);
-       return res;
+       return tipc_send_stream(iocb, sock, m, dsz);
 }
 
 /**
@@ -1104,8 +1222,10 @@ restart:
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
                if ((sock->state != SS_READY) &&
-                   (++port->conn_unacked >= TIPC_CONNACK_INTV))
-                       tipc_acknowledge(port->ref, port->conn_unacked);
+                   (++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
+                       tipc_acknowledge(port->ref, tsk->rcv_unacked);
+                       tsk->rcv_unacked = 0;
+               }
                advance_rx_queue(sk);
        }
 exit:
@@ -1213,8 +1333,10 @@ restart:
 
        /* Consume received message (optional) */
        if (likely(!(flags & MSG_PEEK))) {
-               if (unlikely(++port->conn_unacked >= TIPC_CONNACK_INTV))
-                       tipc_acknowledge(port->ref, port->conn_unacked);
+               if (unlikely(++tsk->rcv_unacked >= TIPC_CONNACK_INTV)) {
+                       tipc_acknowledge(port->ref, tsk->rcv_unacked);
+                       tsk->rcv_unacked = 0;
+               }
                advance_rx_queue(sk);
        }
 
@@ -1269,17 +1391,16 @@ static void tipc_data_ready(struct sock *sk)
  * @tsk: TIPC socket
  * @msg: message
  *
- * Returns TIPC error status code and socket error status code
- * once it encounters some errors
+ * Returns 0 (TIPC_OK) if everyting ok, -TIPC_ERR_NO_PORT otherwise
  */
-static u32 filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
+static int filter_connect(struct tipc_sock *tsk, struct sk_buff **buf)
 {
        struct sock *sk = &tsk->sk;
        struct tipc_port *port = &tsk->port;
        struct socket *sock = sk->sk_socket;
        struct tipc_msg *msg = buf_msg(*buf);
 
-       u32 retval = TIPC_ERR_NO_PORT;
+       int retval = -TIPC_ERR_NO_PORT;
        int res;
 
        if (msg_mcast(msg))
@@ -1382,32 +1503,37 @@ static unsigned int rcvbuf_limit(struct sock *sk, struct sk_buff *buf)
  *
  * Called with socket lock already taken; port lock may also be taken.
  *
- * Returns TIPC error status code (TIPC_OK if message is not to be rejected)
+ * Returns 0 (TIPC_OK) if message was consumed, -TIPC error code if message
+ * to be rejected, 1 (TIPC_FWD_MSG) if (CONN_MANAGER) message to be forwarded
  */
-static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
+static int filter_rcv(struct sock *sk, struct sk_buff *buf)
 {
        struct socket *sock = sk->sk_socket;
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_msg *msg = buf_msg(buf);
        unsigned int limit = rcvbuf_limit(sk, buf);
-       u32 res = TIPC_OK;
+       u32 onode;
+       int rc = TIPC_OK;
+
+       if (unlikely(msg_user(msg) == CONN_MANAGER))
+               return tipc_sk_proto_rcv(tsk, &onode, buf);
 
        /* Reject message if it is wrong sort of message for socket */
        if (msg_type(msg) > TIPC_DIRECT_MSG)
-               return TIPC_ERR_NO_PORT;
+               return -TIPC_ERR_NO_PORT;
 
        if (sock->state == SS_READY) {
                if (msg_connected(msg))
-                       return TIPC_ERR_NO_PORT;
+                       return -TIPC_ERR_NO_PORT;
        } else {
-               res = filter_connect(tsk, &buf);
-               if (res != TIPC_OK || buf == NULL)
-                       return res;
+               rc = filter_connect(tsk, &buf);
+               if (rc != TIPC_OK || buf == NULL)
+                       return rc;
        }
 
        /* Reject message if there isn't room to queue it */
        if (sk_rmem_alloc_get(sk) + buf->truesize >= limit)
-               return TIPC_ERR_OVERLOAD;
+               return -TIPC_ERR_OVERLOAD;
 
        /* Enqueue message */
        TIPC_SKB_CB(buf)->handle = NULL;
@@ -1429,16 +1555,23 @@ static u32 filter_rcv(struct sock *sk, struct sk_buff *buf)
  */
 static int tipc_backlog_rcv(struct sock *sk, struct sk_buff *buf)
 {
-       u32 res;
+       int rc;
+       u32 onode;
        struct tipc_sock *tsk = tipc_sk(sk);
        uint truesize = buf->truesize;
 
-       res = filter_rcv(sk, buf);
-       if (unlikely(res))
-               tipc_reject_msg(buf, res);
+       rc = filter_rcv(sk, buf);
 
-       if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
-               atomic_add(truesize, &tsk->dupl_rcvcnt);
+       if (likely(!rc)) {
+               if (atomic_read(&tsk->dupl_rcvcnt) < TIPC_CONN_OVERLOAD_LIMIT)
+                       atomic_add(truesize, &tsk->dupl_rcvcnt);
+               return 0;
+       }
+
+       if ((rc < 0) && !tipc_msg_reverse(buf, &onode, -rc))
+               return 0;
+
+       tipc_link_xmit(buf, onode, 0);
 
        return 0;
 }
@@ -1455,19 +1588,14 @@ int tipc_sk_rcv(struct sk_buff *buf)
        struct tipc_port *port;
        struct sock *sk;
        u32 dport = msg_destport(buf_msg(buf));
-       int err = TIPC_OK;
+       int rc = TIPC_OK;
        uint limit;
+       u32 dnode;
 
-       /* Forward unresolved named message */
-       if (unlikely(!dport)) {
-               tipc_net_route_msg(buf);
-               return 0;
-       }
-
-       /* Validate destination */
+       /* Validate destination and message */
        port = tipc_port_lock(dport);
        if (unlikely(!port)) {
-               err = TIPC_ERR_NO_PORT;
+               rc = tipc_msg_eval(buf, &dnode);
                goto exit;
        }
 
@@ -1478,23 +1606,25 @@ int tipc_sk_rcv(struct sk_buff *buf)
        bh_lock_sock(sk);
 
        if (!sock_owned_by_user(sk)) {
-               err = filter_rcv(sk, buf);
+               rc = filter_rcv(sk, buf);
        } else {
                if (sk->sk_backlog.len == 0)
                        atomic_set(&tsk->dupl_rcvcnt, 0);
                limit = rcvbuf_limit(sk, buf) + atomic_read(&tsk->dupl_rcvcnt);
                if (sk_add_backlog(sk, buf, limit))
-                       err = TIPC_ERR_OVERLOAD;
+                       rc = -TIPC_ERR_OVERLOAD;
        }
-
        bh_unlock_sock(sk);
        tipc_port_unlock(port);
 
-       if (likely(!err))
+       if (likely(!rc))
                return 0;
 exit:
-       tipc_reject_msg(buf, err);
-       return -EHOSTUNREACH;
+       if ((rc < 0) && !tipc_msg_reverse(buf, &dnode, -rc))
+               return -EHOSTUNREACH;
+
+       tipc_link_xmit(buf, dnode, 0);
+       return (rc < 0) ? -EHOSTUNREACH : 0;
 }
 
 static int tipc_wait_for_connect(struct socket *sock, long *timeo_p)
@@ -1758,6 +1888,7 @@ static int tipc_shutdown(struct socket *sock, int how)
        struct tipc_sock *tsk = tipc_sk(sk);
        struct tipc_port *port = &tsk->port;
        struct sk_buff *buf;
+       u32 peer;
        int res;
 
        if (how != SHUT_RDWR)
@@ -1778,7 +1909,8 @@ restart:
                                goto restart;
                        }
                        tipc_port_disconnect(port->ref);
-                       tipc_reject_msg(buf, TIPC_CONN_SHUTDOWN);
+                       if (tipc_msg_reverse(buf, &peer, TIPC_CONN_SHUTDOWN))
+                               tipc_link_xmit(buf, peer, 0);
                } else {
                        tipc_port_shutdown(port->ref);
                }
@@ -1936,7 +2068,7 @@ static int tipc_getsockopt(struct socket *sock, int lvl, int opt,
        return put_user(sizeof(value), ol);
 }
 
-int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
+static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
 {
        struct tipc_sioc_ln_req lnr;
        void __user *argp = (void __user *)arg;
@@ -1952,7 +2084,6 @@ int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
                        return 0;
                }
                return -EADDRNOTAVAIL;
-               break;
        default:
                return -ENOIOCTLCMD;
        }
index 3afcd2a..43b75b3 100644 (file)
@@ -38,6 +38,9 @@
 #include "port.h"
 #include <net/sock.h>
 
+#define TIPC_CONN_OK      0
+#define TIPC_CONN_PROBING 1
+
 /**
  * struct tipc_sock - TIPC socket structure
  * @sk: socket - interacts with 'port' and with user via the socket API
@@ -45,6 +48,9 @@
  * @peer_name: the peer of the connection, if any
  * @conn_timeout: the time we can wait for an unresponded setup request
  * @dupl_rcvcnt: number of bytes counted twice, in both backlog and rcv queue
+ * @link_cong: non-zero if owner must sleep because of link congestion
+ * @sent_unacked: # messages sent by socket, and not yet acked by peer
+ * @rcv_unacked: # messages read by user, but not yet acked back to peer
  */
 
 struct tipc_sock {
@@ -52,6 +58,9 @@ struct tipc_sock {
        struct tipc_port port;
        unsigned int conn_timeout;
        atomic_t dupl_rcvcnt;
+       int link_cong;
+       uint sent_unacked;
+       uint rcv_unacked;
 };
 
 static inline struct tipc_sock *tipc_sk(const struct sock *sk)
@@ -69,6 +78,13 @@ static inline void tipc_sock_wakeup(struct tipc_sock *tsk)
        tsk->sk.sk_write_space(&tsk->sk);
 }
 
+static inline int tipc_sk_conn_cong(struct tipc_sock *tsk)
+{
+       return tsk->sent_unacked >= TIPC_FLOWCTRL_WIN;
+}
+
 int tipc_sk_rcv(struct sk_buff *buf);
 
+void tipc_sk_mcast_rcv(struct sk_buff *buf);
+
 #endif
index a8ef510..0525d78 100644 (file)
@@ -2097,6 +2097,8 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
                                goto no_transform;
                        }
 
+                       dst_hold(&xdst->u.dst);
+                       xdst->u.dst.flags |= DST_NOCACHE;
                        route = xdst->route;
                }
        }
index 412d9dc..d4db6eb 100644 (file)
@@ -177,9 +177,7 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                    attrs[XFRMA_ALG_AEAD]       ||
                    attrs[XFRMA_ALG_CRYPT]      ||
                    attrs[XFRMA_ALG_COMP]       ||
-                   attrs[XFRMA_TFCPAD]         ||
-                   (ntohl(p->id.spi) >= 0x10000))
-
+                   attrs[XFRMA_TFCPAD])
                        goto out;
                break;
 
@@ -207,7 +205,8 @@ static int verify_newsa_info(struct xfrm_usersa_info *p,
                    attrs[XFRMA_ALG_AUTH]       ||
                    attrs[XFRMA_ALG_AUTH_TRUNC] ||
                    attrs[XFRMA_ALG_CRYPT]      ||
-                   attrs[XFRMA_TFCPAD])
+                   attrs[XFRMA_TFCPAD]         ||
+                   (ntohl(p->id.spi) >= 0x10000))
                        goto out;
                break;
 
index 6af3732..4b0113f 100644 (file)
@@ -56,7 +56,8 @@
  * struct:  This defines the way the data will be stored in the ring buffer.
  *    There are currently two types of elements. __field and __array.
  *    a __field is broken up into (type, name). Where type can be any
- *    type but an array.
+ *    primitive type (integer, long or pointer). __field_struct() can
+ *    be any static complex data value (struct, union, but not an array).
  *    For an array. there are three fields. (type, name, size). The
  *    type of elements in the array, the name of the field and the size
  *    of the array.
index 010b18e..182be0f 100755 (executable)
@@ -3476,12 +3476,17 @@ sub process {
                        }
                }
 
-# unnecessary return in a void function? (a single leading tab, then return;)
-               if ($sline =~ /^\+\treturn\s*;\s*$/ &&
-                   $prevline =~ /^\+/) {
+# unnecessary return in a void function
+# at end-of-function, with the previous line a single leading tab, then return;
+# and the line before that not a goto label target like "out:"
+               if ($sline =~ /^[ \+]}\s*$/ &&
+                   $prevline =~ /^\+\treturn\s*;\s*$/ &&
+                   $linenr >= 3 &&
+                   $lines[$linenr - 3] =~ /^[ +]/ &&
+                   $lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
                        WARN("RETURN_VOID",
-                            "void function return statements are not generally useful\n" . $herecurr);
-               }
+                            "void function return statements are not generally useful\n" . $hereprev);
+               }
 
 # if statements using unnecessary parentheses - ie: if ((foo == bar))
                if ($^V && $^V ge 5.10.0 &&
index da058da..16a07cf 100755 (executable)
@@ -2073,6 +2073,7 @@ sub check_return_section {
 sub dump_function($$) {
     my $prototype = shift;
     my $file = shift;
+    my $noret = 0;
 
     $prototype =~ s/^static +//;
     $prototype =~ s/^extern +//;
@@ -2086,7 +2087,7 @@ sub dump_function($$) {
     $prototype =~ s/__init_or_module +//;
     $prototype =~ s/__must_check +//;
     $prototype =~ s/__weak +//;
-    $prototype =~ s/^#\s*define\s+//; #ak added
+    my $define = $prototype =~ s/^#\s*define\s+//; #ak added
     $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
 
     # Yes, this truly is vile.  We are looking for:
@@ -2105,7 +2106,15 @@ sub dump_function($$) {
     # - atomic_set (macro)
     # - pci_match_device, __copy_to_user (long return type)
 
-    if ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
+    if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) {
+        # This is an object-like macro, it has no return type and no parameter
+        # list.
+        # Function-like macros are not allowed to have spaces between
+        # declaration_name and opening parenthesis (notice the \s+).
+        $return_type = $1;
+        $declaration_name = $2;
+        $noret = 1;
+    } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
        $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
        $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
        $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
@@ -2140,7 +2149,7 @@ sub dump_function($$) {
         # of warnings goes sufficiently down, the check is only performed in
         # verbose mode.
         # TODO: always perform the check.
-        if ($verbose) {
+        if ($verbose && !$noret) {
                 check_return_section($file, $declaration_name, $return_type);
         }
 
index b5f08f7..35d5a58 100644 (file)
@@ -289,14 +289,16 @@ EOF
 
 fi
 
-# Build header package
-(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
-(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
-(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
+# Build kernel header package
+(cd $srctree; find . -name Makefile\* -o -name Kconfig\* -o -name \*.pl) > "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find arch/$SRCARCH/include include scripts -type f) >> "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find arch/$SRCARCH -name module.lds -o -name Kbuild.platforms -o -name Platform) >> "$objtree/debian/hdrsrcfiles"
+(cd $srctree; find $(find arch/$SRCARCH -name include -o -name scripts -type d) -type f) >> "$objtree/debian/hdrsrcfiles"
+(cd $objtree; find arch/$SRCARCH/include Module.symvers include scripts -type f) >> "$objtree/debian/hdrobjfiles"
 destdir=$kernel_headers_dir/usr/src/linux-headers-$version
 mkdir -p "$destdir"
-(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
-(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -)
+(cd $srctree; tar -c -f - -T -) < "$objtree/debian/hdrsrcfiles" | (cd $destdir; tar -xf -)
+(cd $objtree; tar -c -f - -T -) < "$objtree/debian/hdrobjfiles" | (cd $destdir; tar -xf -)
 (cd $objtree; cp $KCONFIG_CONFIG $destdir/.config) # copy .config manually to be where it's expected to be
 ln -sf "/usr/src/linux-headers-$version" "$kernel_headers_dir/lib/modules/$version/build"
 rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles"
index 995c1ea..e046bff 100644 (file)
@@ -125,12 +125,11 @@ esac
 # Create the tarball
 #
 (
-       cd "${tmpdir}"
        opts=
        if tar --owner=root --group=root --help >/dev/null 2>&1; then
                opts="--owner=root --group=root"
        fi
-       tar cf - boot/* lib/* $opts | ${compress} > "${tarball}${file_ext}"
+       tar cf - -C "$tmpdir" boot/ lib/ $opts | ${compress} > "${tarball}${file_ext}"
 )
 
 echo "Tarball successfully created in ${tarball}${file_ext}"
index 9d1421e..49b582a 100644 (file)
@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
 
 static int MIPS_is_fake_mcount(Elf_Rel const *rp)
 {
-       static Elf_Addr old_r_offset;
+       static Elf_Addr old_r_offset = ~(Elf_Addr)0;
        Elf_Addr current_r_offset = _w(rp->r_offset);
        int is_fake;
 
-       is_fake = old_r_offset &&
+       is_fake = (old_r_offset != ~(Elf_Addr)0) &&
                (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
        old_r_offset = current_r_offset;
 
index f038f5a..f0b0e14 100644 (file)
@@ -288,6 +288,10 @@ static bool snd_ctl_remove_numid_conflict(struct snd_card *card,
 {
        struct snd_kcontrol *kctl;
 
+       /* Make sure that the ids assigned to the control do not wrap around */
+       if (card->last_numid >= UINT_MAX - count)
+               card->last_numid = 0;
+
        list_for_each_entry(kctl, &card->controls, list) {
                if (kctl->id.numid < card->last_numid + 1 + count &&
                    kctl->id.numid + kctl->count > card->last_numid + 1) {
@@ -330,6 +334,7 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
 {
        struct snd_ctl_elem_id id;
        unsigned int idx;
+       unsigned int count;
        int err = -EINVAL;
 
        if (! kcontrol)
@@ -337,6 +342,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        if (snd_BUG_ON(!card || !kcontrol->info))
                goto error;
        id = kcontrol->id;
+       if (id.index > UINT_MAX - kcontrol->count)
+               goto error;
+
        down_write(&card->controls_rwsem);
        if (snd_ctl_find_id(card, &id)) {
                up_write(&card->controls_rwsem);
@@ -358,8 +366,9 @@ int snd_ctl_add(struct snd_card *card, struct snd_kcontrol *kcontrol)
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -388,6 +397,7 @@ int snd_ctl_replace(struct snd_card *card, struct snd_kcontrol *kcontrol,
                    bool add_on_replace)
 {
        struct snd_ctl_elem_id id;
+       unsigned int count;
        unsigned int idx;
        struct snd_kcontrol *old;
        int ret;
@@ -423,8 +433,9 @@ add:
        card->controls_count += kcontrol->count;
        kcontrol->id.numid = card->last_numid + 1;
        card->last_numid += kcontrol->count;
+       count = kcontrol->count;
        up_write(&card->controls_rwsem);
-       for (idx = 0; idx < kcontrol->count; idx++, id.index++, id.numid++)
+       for (idx = 0; idx < count; idx++, id.index++, id.numid++)
                snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_ADD, &id);
        return 0;
 
@@ -897,9 +908,9 @@ static int snd_ctl_elem_write(struct snd_card *card, struct snd_ctl_file *file,
                        result = kctl->put(kctl, control);
                }
                if (result > 0) {
+                       struct snd_ctl_elem_id id = control->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE,
-                                      &control->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_VALUE, &id);
                        return 0;
                }
        }
@@ -991,6 +1002,7 @@ static int snd_ctl_elem_unlock(struct snd_ctl_file *file,
 
 struct user_element {
        struct snd_ctl_elem_info info;
+       struct snd_card *card;
        void *elem_data;                /* element data */
        unsigned long elem_data_size;   /* size of element data in bytes */
        void *tlv_data;                 /* TLV data */
@@ -1034,7 +1046,9 @@ static int snd_ctl_elem_user_get(struct snd_kcontrol *kcontrol,
 {
        struct user_element *ue = kcontrol->private_data;
 
+       mutex_lock(&ue->card->user_ctl_lock);
        memcpy(&ucontrol->value, ue->elem_data, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return 0;
 }
 
@@ -1043,10 +1057,12 @@ static int snd_ctl_elem_user_put(struct snd_kcontrol *kcontrol,
 {
        int change;
        struct user_element *ue = kcontrol->private_data;
-       
+
+       mutex_lock(&ue->card->user_ctl_lock);
        change = memcmp(&ucontrol->value, ue->elem_data, ue->elem_data_size) != 0;
        if (change)
                memcpy(ue->elem_data, &ucontrol->value, ue->elem_data_size);
+       mutex_unlock(&ue->card->user_ctl_lock);
        return change;
 }
 
@@ -1066,19 +1082,32 @@ static int snd_ctl_elem_user_tlv(struct snd_kcontrol *kcontrol,
                new_data = memdup_user(tlv, size);
                if (IS_ERR(new_data))
                        return PTR_ERR(new_data);
+               mutex_lock(&ue->card->user_ctl_lock);
                change = ue->tlv_data_size != size;
                if (!change)
                        change = memcmp(ue->tlv_data, new_data, size);
                kfree(ue->tlv_data);
                ue->tlv_data = new_data;
                ue->tlv_data_size = size;
+               mutex_unlock(&ue->card->user_ctl_lock);
        } else {
-               if (! ue->tlv_data_size || ! ue->tlv_data)
-                       return -ENXIO;
-               if (size < ue->tlv_data_size)
-                       return -ENOSPC;
+               int ret = 0;
+
+               mutex_lock(&ue->card->user_ctl_lock);
+               if (!ue->tlv_data_size || !ue->tlv_data) {
+                       ret = -ENXIO;
+                       goto err_unlock;
+               }
+               if (size < ue->tlv_data_size) {
+                       ret = -ENOSPC;
+                       goto err_unlock;
+               }
                if (copy_to_user(tlv, ue->tlv_data, ue->tlv_data_size))
-                       return -EFAULT;
+                       ret = -EFAULT;
+err_unlock:
+               mutex_unlock(&ue->card->user_ctl_lock);
+               if (ret)
+                       return ret;
        }
        return change;
 }
@@ -1136,8 +1165,6 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        struct user_element *ue;
        int idx, err;
 
-       if (!replace && card->user_ctl_count >= MAX_USER_CONTROLS)
-               return -ENOMEM;
        if (info->count < 1)
                return -EINVAL;
        access = info->access == 0 ? SNDRV_CTL_ELEM_ACCESS_READWRITE :
@@ -1146,21 +1173,16 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
                                 SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE));
        info->id.numid = 0;
        memset(&kctl, 0, sizeof(kctl));
-       down_write(&card->controls_rwsem);
-       _kctl = snd_ctl_find_id(card, &info->id);
-       err = 0;
-       if (_kctl) {
-               if (replace)
-                       err = snd_ctl_remove(card, _kctl);
-               else
-                       err = -EBUSY;
-       } else {
-               if (replace)
-                       err = -ENOENT;
+
+       if (replace) {
+               err = snd_ctl_remove_user_ctl(file, &info->id);
+               if (err)
+                       return err;
        }
-       up_write(&card->controls_rwsem);
-       if (err < 0)
-               return err;
+
+       if (card->user_ctl_count >= MAX_USER_CONTROLS)
+               return -ENOMEM;
+
        memcpy(&kctl.id, &info->id, sizeof(info->id));
        kctl.count = info->owner ? info->owner : 1;
        access |= SNDRV_CTL_ELEM_ACCESS_USER;
@@ -1210,6 +1232,7 @@ static int snd_ctl_elem_add(struct snd_ctl_file *file,
        ue = kzalloc(sizeof(struct user_element) + private_size, GFP_KERNEL);
        if (ue == NULL)
                return -ENOMEM;
+       ue->card = card;
        ue->info = *info;
        ue->info.access = 0;
        ue->elem_data = (char *)ue + sizeof(*ue);
@@ -1321,8 +1344,9 @@ static int snd_ctl_tlv_ioctl(struct snd_ctl_file *file,
                }
                err = kctl->tlv.c(kctl, op_flag, tlv.length, _tlv->tlv);
                if (err > 0) {
+                       struct snd_ctl_elem_id id = kctl->id;
                        up_read(&card->controls_rwsem);
-                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &kctl->id);
+                       snd_ctl_notify(card, SNDRV_CTL_EVENT_MASK_TLV, &id);
                        return 0;
                }
        } else {
index 5ee8384..7bdfd19 100644 (file)
@@ -232,6 +232,7 @@ int snd_card_new(struct device *parent, int idx, const char *xid,
        INIT_LIST_HEAD(&card->devices);
        init_rwsem(&card->controls_rwsem);
        rwlock_init(&card->ctl_files_rwlock);
+       mutex_init(&card->user_ctl_lock);
        INIT_LIST_HEAD(&card->controls);
        INIT_LIST_HEAD(&card->ctl_files);
        spin_lock_init(&card->files_lock);
index 6af50eb..70faa3a 100644 (file)
@@ -379,11 +379,11 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
        struct special_params *params = bebob->maudio_special_quirk;
        int err, id;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
        if (id >= ARRAY_SIZE(special_clk_labels))
-               return 0;
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob, id,
                                         params->dig_in_fmt,
@@ -391,7 +391,10 @@ static int special_clk_ctl_put(struct snd_kcontrol *kctl,
                                         params->clk_lock);
        mutex_unlock(&bebob->mutex);
 
-       return err >= 0;
+       if (err >= 0)
+               err = 1;
+
+       return err;
 }
 static struct snd_kcontrol_new special_clk_ctl = {
        .name   = "Clock Source",
@@ -434,8 +437,8 @@ static struct snd_kcontrol_new special_sync_ctl = {
        .get    = special_sync_ctl_get,
 };
 
-/* Digital interface control for special firmware */
-static char *const special_dig_iface_labels[] = {
+/* Digital input interface control for special firmware */
+static char *const special_dig_in_iface_labels[] = {
        "S/PDIF Optical", "S/PDIF Coaxial", "ADAT Optical"
 };
 static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
@@ -443,13 +446,13 @@ static int special_dig_in_iface_ctl_info(struct snd_kcontrol *kctl,
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels);
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_in_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item]);
+              special_dig_in_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -491,26 +494,36 @@ static int special_dig_in_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id, dig_in_fmt, dig_in_iface;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_in_iface_labels))
+               return -EINVAL;
 
        /* decode user value */
        dig_in_fmt = (id >> 1) & 0x01;
        dig_in_iface = id & 0x01;
 
+       mutex_lock(&bebob->mutex);
+
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         dig_in_fmt,
                                         params->dig_out_fmt,
                                         params->clk_lock);
-       if ((err < 0) || (params->dig_in_fmt > 0)) /* ADAT */
+       if (err < 0)
+               goto end;
+
+       /* For ADAT, optical interface is only available. */
+       if (params->dig_in_fmt > 0) {
+               err = 1;
                goto end;
+       }
 
+       /* For S/PDIF, optical/coaxial interfaces are selectable. */
        err = avc_audio_set_selector(bebob->unit, 0x00, 0x04, dig_in_iface);
        if (err < 0)
                dev_err(&bebob->unit->device,
                        "fail to set digital input interface: %d\n", err);
+       err = 1;
 end:
        special_stream_formation_set(bebob);
        mutex_unlock(&bebob->mutex);
@@ -525,18 +538,22 @@ static struct snd_kcontrol_new special_dig_in_iface_ctl = {
        .put    = special_dig_in_iface_ctl_set
 };
 
+/* Digital output interface control for special firmware */
+static char *const special_dig_out_iface_labels[] = {
+       "S/PDIF Optical and Coaxial", "ADAT Optical"
+};
 static int special_dig_out_iface_ctl_info(struct snd_kcontrol *kctl,
                                          struct snd_ctl_elem_info *einf)
 {
        einf->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
        einf->count = 1;
-       einf->value.enumerated.items = ARRAY_SIZE(special_dig_iface_labels) - 1;
+       einf->value.enumerated.items = ARRAY_SIZE(special_dig_out_iface_labels);
 
        if (einf->value.enumerated.item >= einf->value.enumerated.items)
                einf->value.enumerated.item = einf->value.enumerated.items - 1;
 
        strcpy(einf->value.enumerated.name,
-              special_dig_iface_labels[einf->value.enumerated.item + 1]);
+              special_dig_out_iface_labels[einf->value.enumerated.item]);
 
        return 0;
 }
@@ -558,16 +575,20 @@ static int special_dig_out_iface_ctl_set(struct snd_kcontrol *kctl,
        unsigned int id;
        int err;
 
-       mutex_lock(&bebob->mutex);
-
        id = uval->value.enumerated.item[0];
+       if (id >= ARRAY_SIZE(special_dig_out_iface_labels))
+               return -EINVAL;
+
+       mutex_lock(&bebob->mutex);
 
        err = avc_maudio_set_special_clk(bebob,
                                         params->clk_src,
                                         params->dig_in_fmt,
                                         id, params->clk_lock);
-       if (err >= 0)
+       if (err >= 0) {
                special_stream_formation_set(bebob);
+               err = 1;
+       }
 
        mutex_unlock(&bebob->mutex);
        return err;
index b684c6e..dabe419 100644 (file)
@@ -898,6 +898,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
                        if (!strcmp(codec->modelname, models->name)) {
                                codec->fixup_id = models->id;
                                codec->fixup_name = models->name;
+                               codec->fixup_list = fixlist;
                                codec->fixup_forced = 1;
                                return;
                        }
index 480bbdd..6df04d9 100644 (file)
@@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
                                dsp_unlock(azx_dev);
                                return azx_dev;
                        }
-                       if (!res)
+                       if (!res ||
+                           (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
                                res = azx_dev;
                }
                dsp_unlock(azx_dev);
index 9d07e4e..8b4940b 100644 (file)
 #include <linux/module.h>
 #include <sound/core.h>
 #include <drm/i915_powerwell.h>
+#include "hda_priv.h"
 #include "hda_i915.h"
 
-static void (*get_power)(void);
-static void (*put_power)(void);
+/* Intel HSW/BDW display HDA controller Extended Mode registers.
+ * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
+ * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N
+ * The values will be lost when the display power well is disabled.
+ */
+#define ICH6_REG_EM4                   0x100c
+#define ICH6_REG_EM5                   0x1010
+
+static int (*get_power)(void);
+static int (*put_power)(void);
+static int (*get_cdclk)(void);
 
-void hda_display_power(bool enable)
+int hda_display_power(bool enable)
 {
        if (!get_power || !put_power)
-               return;
+               return -ENODEV;
 
        pr_debug("HDA display power %s \n",
                        enable ? "Enable" : "Disable");
        if (enable)
-               get_power();
+               return get_power();
        else
-               put_power();
+               return put_power();
+}
+
+void haswell_set_bclk(struct azx *chip)
+{
+       int cdclk_freq;
+       unsigned int bclk_m, bclk_n;
+
+       if (!get_cdclk)
+               return;
+
+       cdclk_freq = get_cdclk();
+       switch (cdclk_freq) {
+       case 337500:
+               bclk_m = 16;
+               bclk_n = 225;
+               break;
+
+       case 450000:
+       default: /* default CDCLK 450MHz */
+               bclk_m = 4;
+               bclk_n = 75;
+               break;
+
+       case 540000:
+               bclk_m = 4;
+               bclk_n = 90;
+               break;
+
+       case 675000:
+               bclk_m = 8;
+               bclk_n = 225;
+               break;
+       }
+
+       azx_writew(chip, EM4, bclk_m);
+       azx_writew(chip, EM5, bclk_n);
 }
 
+
 int hda_i915_init(void)
 {
        int err = 0;
@@ -55,6 +102,10 @@ int hda_i915_init(void)
                return -ENODEV;
        }
 
+       get_cdclk = symbol_request(i915_get_cdclk_freq);
+       if (!get_cdclk) /* may have abnormal BCLK and audio playback rate */
+               pr_warn("hda-i915: get_cdclk symbol get fail\n");
+
        pr_debug("HDA driver get symbol successfully from i915 module\n");
 
        return err;
@@ -70,6 +121,10 @@ int hda_i915_exit(void)
                symbol_put(i915_release_power_well);
                put_power = NULL;
        }
+       if (get_cdclk) {
+               symbol_put(i915_get_cdclk_freq);
+               get_cdclk = NULL;
+       }
 
        return 0;
 }
index 5a63da2..e6072c6 100644 (file)
 #define __SOUND_HDA_I915_H
 
 #ifdef CONFIG_SND_HDA_I915
-void hda_display_power(bool enable);
+int hda_display_power(bool enable);
+void haswell_set_bclk(struct azx *chip);
 int hda_i915_init(void);
 int hda_i915_exit(void);
 #else
-static inline void hda_display_power(bool enable) {}
+static inline int hda_display_power(bool enable) { return 0; }
+static inline void haswell_set_bclk(struct azx *chip) { return; }
 static inline int hda_i915_init(void)
 {
        return -ENODEV;
index bb65a12..83cd190 100644 (file)
@@ -62,9 +62,9 @@
 #include <linux/vga_switcheroo.h>
 #include <linux/firmware.h>
 #include "hda_codec.h"
-#include "hda_i915.h"
 #include "hda_controller.h"
 #include "hda_priv.h"
+#include "hda_i915.h"
 
 
 static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -227,7 +227,7 @@ enum {
 /* quirks for Intel PCH */
 #define AZX_DCAPS_INTEL_PCH_NOPM \
        (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
-        AZX_DCAPS_COUNT_LPIB_DELAY)
+        AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
 
 #define AZX_DCAPS_INTEL_PCH \
        (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -288,6 +288,11 @@ static char *driver_short_names[] = {
        [AZX_DRIVER_GENERIC] = "HD-Audio Generic",
 };
 
+struct hda_intel {
+       struct azx chip;
+};
+
+
 #ifdef CONFIG_X86
 static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
 {
@@ -591,7 +596,7 @@ static int azx_suspend(struct device *dev)
        struct azx *chip = card->private_data;
        struct azx_pcm *p;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -606,6 +611,7 @@ static int azx_suspend(struct device *dev)
                free_irq(chip->irq, chip);
                chip->irq = -1;
        }
+
        if (chip->msi)
                pci_disable_msi(chip->pci);
        pci_disable_device(pci);
@@ -622,11 +628,13 @@ static int azx_resume(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
                hda_display_power(true);
+               haswell_set_bclk(chip);
+       }
        pci_set_power_state(pci, PCI_D0);
        pci_restore_state(pci);
        if (pci_enable_device(pci) < 0) {
@@ -657,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -672,6 +680,7 @@ static int azx_runtime_suspend(struct device *dev)
        azx_clear_irq_pending(chip);
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
                hda_display_power(false);
+
        return 0;
 }
 
@@ -683,14 +692,16 @@ static int azx_runtime_resume(struct device *dev)
        struct hda_codec *codec;
        int status;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
                return 0;
 
-       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
                hda_display_power(true);
+               haswell_set_bclk(chip);
+       }
 
        /* Read STATESTS before controller reset */
        status = azx_readw(chip, STATESTS);
@@ -718,7 +729,7 @@ static int azx_runtime_idle(struct device *dev)
        struct snd_card *card = dev_get_drvdata(dev);
        struct azx *chip = card->private_data;
 
-       if (chip->disabled)
+       if (chip->disabled || chip->init_failed)
                return 0;
 
        if (!power_save_controller ||
@@ -883,6 +894,8 @@ static int register_vga_switcheroo(struct azx *chip)
 static int azx_free(struct azx *chip)
 {
        struct pci_dev *pci = chip->pci;
+       struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
+
        int i;
 
        if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
@@ -930,7 +943,7 @@ static int azx_free(struct azx *chip)
                hda_display_power(false);
                hda_i915_exit();
        }
-       kfree(chip);
+       kfree(hda);
 
        return 0;
 }
@@ -1174,6 +1187,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        static struct snd_device_ops ops = {
                .dev_free = azx_dev_free,
        };
+       struct hda_intel *hda;
        struct azx *chip;
        int err;
 
@@ -1183,13 +1197,14 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
        if (err < 0)
                return err;
 
-       chip = kzalloc(sizeof(*chip), GFP_KERNEL);
-       if (!chip) {
-               dev_err(card->dev, "Cannot allocate chip\n");
+       hda = kzalloc(sizeof(*hda), GFP_KERNEL);
+       if (!hda) {
+               dev_err(card->dev, "Cannot allocate hda\n");
                pci_disable_device(pci);
                return -ENOMEM;
        }
 
+       chip = &hda->chip;
        spin_lock_init(&chip->reg_lock);
        mutex_init(&chip->open_mutex);
        chip->card = card;
@@ -1375,6 +1390,10 @@ static int azx_first_init(struct azx *chip)
 
        /* initialize chip */
        azx_init_pci(chip);
+
+       if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
+               haswell_set_bclk(chip);
+
        azx_init_chip(chip, (probe_only[dev] & 2) == 0);
 
        /* codec detection */
@@ -1656,8 +1675,13 @@ static int azx_probe_continue(struct azx *chip)
                                "Error request power-well from i915\n");
                        goto out_free;
                }
+               err = hda_display_power(true);
+               if (err < 0) {
+                       dev_err(chip->card->dev,
+                               "Cannot turn on display power on i915\n");
+                       goto out_free;
+               }
 #endif
-               hda_display_power(true);
        }
 
        err = azx_first_init(chip);
index ebd1fa6..4e2d486 100644 (file)
@@ -417,6 +417,27 @@ struct snd_hda_pin_quirk {
        int value;                      /* quirk value */
 };
 
+#ifdef CONFIG_SND_DEBUG_VERBOSE
+
+#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
+       { .codec = _codec,\
+         .subvendor = _subvendor,\
+         .name = _name,\
+         .value = _value,\
+         .pins = (const struct hda_pintbl[]) { _pins } \
+       }
+#else
+
+#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
+       { .codec = _codec,\
+         .subvendor = _subvendor,\
+         .value = _value,\
+         .pins = (const struct hda_pintbl[]) { _pins } \
+       }
+
+#endif
+
+
 /* fixup types */
 enum {
        HDA_FIXUP_INVALID,
index 4a7cb01..e9d1a57 100644 (file)
@@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
 #define AZX_DCAPS_BUFSIZE      (1 << 21)       /* no buffer size alignment */
 #define AZX_DCAPS_ALIGN_BUFSIZE        (1 << 22)       /* buffer size alignment */
 #define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23)   /* BDLE in 4k boundary */
+#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24)     /* Assign devices in reverse order */
 #define AZX_DCAPS_COUNT_LPIB_DELAY  (1 << 25)  /* Take LPIB as delay */
 #define AZX_DCAPS_PM_RUNTIME   (1 << 26)       /* runtime PM support */
 #define AZX_DCAPS_I915_POWERWELL (1 << 27)     /* HSW i915 powerwell support */
index a366ba9..358414d 100644 (file)
@@ -236,6 +236,7 @@ disable_hda:
        return rc;
 }
 
+#ifdef CONFIG_PM_SLEEP
 static void hda_tegra_disable_clocks(struct hda_tegra *data)
 {
        clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
        clk_disable_unprepare(data->hda_clk);
 }
 
-#ifdef CONFIG_PM_SLEEP
 /*
  * power management
  */
index 3e4417b..ba4ca52 100644 (file)
@@ -2204,7 +2204,7 @@ static int generic_hdmi_resume(struct hda_codec *codec)
        struct hdmi_spec *spec = codec->spec;
        int pin_idx;
 
-       generic_hdmi_init(codec);
+       codec->patch_ops.init(codec);
        snd_hda_codec_resume_amp(codec);
        snd_hda_codec_resume_cache(codec);
 
@@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
 { .id = 0x10de0051, .name = "GPU 51 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0060, .name = "GPU 60 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0067, .name = "MCP67 HDMI",      .patch = patch_nvhdmi_2ch },
+{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de0071, .name = "GPU 71 HDMI/DP",  .patch = patch_nvhdmi },
 { .id = 0x10de8001, .name = "MCP73 HDMI",      .patch = patch_nvhdmi_2ch },
 { .id = 0x11069f80, .name = "VX900 HDMI/DP",   .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
 MODULE_ALIAS("snd-hda-codec-id:10de0051");
 MODULE_ALIAS("snd-hda-codec-id:10de0060");
 MODULE_ALIAS("snd-hda-codec-id:10de0067");
+MODULE_ALIAS("snd-hda-codec-id:10de0070");
 MODULE_ALIAS("snd-hda-codec-id:10de0071");
 MODULE_ALIAS("snd-hda-codec-id:10de8001");
 MODULE_ALIAS("snd-hda-codec-id:11069f80");
index af76995..b60824e 100644 (file)
@@ -4880,6 +4880,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4962,228 +4963,141 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
 };
 
 static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60140},
-                       {0x14, 0x90170110},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211020},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170120},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211030},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170120},
-                       {0x17, 0x90170140},
-                       {0x18, 0x40000000},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41163b05},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x0321102f},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170130},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211040},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170140},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211050},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60170},
-                       {0x14, 0x90170120},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211030},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0255,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60170},
-                       {0x14, 0x90170130},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211040},
-               },
-               .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0283,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60130},
-                       {0x14, 0x90170110},
-                       {0x17, 0x40020008},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40e00001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x0321101f},
-               },
-               .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0283,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60160},
-                       {0x14, 0x90170120},
-                       {0x17, 0x40000000},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-                       {0x21, 0x02211030},
-               },
-               .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0292,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x90a60140},
-                       {0x13, 0x411111f0},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0221401f},
-                       {0x16, 0x411111f0},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-               },
-               .value = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
-       },
-       {
-               .codec = 0x10ec0293,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x40000000},
-                       {0x13, 0x90a60140},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0221401f},
-                       {0x16, 0x21014020},
-                       {0x18, 0x411111f0},
-                       {0x19, 0x21a19030},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x40700001},
-                       {0x1e, 0x411111f0},
-               },
-               .value = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
-       },
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60140},
+               {0x14, 0x90170110},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211020}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x90170140},
+               {0x18, 0x40000000},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41163b05},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0321102f}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170130},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170140},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211050}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60170},
+               {0x14, 0x90170130},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x17, 0x40020008},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40e00001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x0321101f}),
+       SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0},
+               {0x21, 0x02211030}),
+       SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
+               {0x12, 0x90a60140},
+               {0x13, 0x411111f0},
+               {0x14, 0x90170110},
+               {0x15, 0x0221401f},
+               {0x16, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x13, 0x90a60140},
+               {0x14, 0x90170110},
+               {0x15, 0x0221401f},
+               {0x16, 0x21014020},
+               {0x18, 0x411111f0},
+               {0x19, 0x21a19030},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
+               {0x12, 0x40000000},
+               {0x13, 0x90a60140},
+               {0x14, 0x90170110},
+               {0x15, 0x0221401f},
+               {0x16, 0x411111f0},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40700001},
+               {0x1e, 0x411111f0}),
        {}
 };
 
@@ -6039,90 +5953,66 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
 };
 
 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x99a30130},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x99a30140},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x99a30150},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
-       {
-               .codec = 0x10ec0668,
-               .subvendor = 0x1028,
-#ifdef CONFIG_SND_DEBUG_VERBOSE
-               .name = "Dell",
-#endif
-               .pins = (const struct hda_pintbl[]) {
-                       {0x12, 0x411111f0},
-                       {0x14, 0x90170110},
-                       {0x15, 0x0321101f},
-                       {0x16, 0x03011020},
-                       {0x18, 0x40000008},
-                       {0x19, 0x411111f0},
-                       {0x1a, 0x411111f0},
-                       {0x1b, 0x411111f0},
-                       {0x1d, 0x41000001},
-                       {0x1e, 0x411111f0},
-                       {0x1f, 0x411111f0},
-               },
-               .value = ALC668_FIXUP_AUTO_MUTE,
-       },
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x99a30130},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x99a30140},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x99a30150},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x411111f0},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x03011020},
+               {0x18, 0x40000008},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x41000001},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
+       SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE,
+               {0x12, 0x90a60130},
+               {0x14, 0x90170110},
+               {0x15, 0x0321101f},
+               {0x16, 0x40000000},
+               {0x18, 0x411111f0},
+               {0x19, 0x411111f0},
+               {0x1a, 0x411111f0},
+               {0x1b, 0x411111f0},
+               {0x1d, 0x40d6832d},
+               {0x1e, 0x411111f0},
+               {0x1f, 0x411111f0}),
        {}
 };
 
index 7f40a15..3744ea4 100644 (file)
@@ -121,6 +121,12 @@ enum {
        STAC_92HD71BXX_MODELS
 };
 
+enum {
+       STAC_92HD95_HP_LED,
+       STAC_92HD95_HP_BASS,
+       STAC_92HD95_MODELS
+};
+
 enum {
        STAC_925x_REF,
        STAC_M1,
@@ -4128,6 +4134,48 @@ static const struct snd_pci_quirk stac9205_fixup_tbl[] = {
        {} /* terminator */
 };
 
+static void stac92hd95_fixup_hp_led(struct hda_codec *codec,
+                                   const struct hda_fixup *fix, int action)
+{
+       struct sigmatel_spec *spec = codec->spec;
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       if (find_mute_led_cfg(codec, spec->default_polarity))
+               codec_dbg(codec, "mute LED gpio %d polarity %d\n",
+                               spec->gpio_led,
+                               spec->gpio_led_polarity);
+}
+
+static const struct hda_fixup stac92hd95_fixups[] = {
+       [STAC_92HD95_HP_LED] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = stac92hd95_fixup_hp_led,
+       },
+       [STAC_92HD95_HP_BASS] = {
+               .type = HDA_FIXUP_VERBS,
+               .v.verbs = (const struct hda_verb[]) {
+                       {0x1a, 0x795, 0x00}, /* HPF to 100Hz */
+                       {}
+               },
+               .chained = true,
+               .chain_id = STAC_92HD95_HP_LED,
+       },
+};
+
+static const struct snd_pci_quirk stac92hd95_fixup_tbl[] = {
+       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1911, "HP Spectre 13", STAC_92HD95_HP_BASS),
+       {} /* terminator */
+};
+
+static const struct hda_model_fixup stac92hd95_models[] = {
+       { .id = STAC_92HD95_HP_LED, .name = "hp-led" },
+       { .id = STAC_92HD95_HP_BASS, .name = "hp-bass" },
+       {}
+};
+
+
 static int stac_parse_auto_config(struct hda_codec *codec)
 {
        struct sigmatel_spec *spec = codec->spec;
@@ -4580,10 +4628,16 @@ static int patch_stac92hd95(struct hda_codec *codec)
        spec->gen.beep_nid = 0x19; /* digital beep */
        spec->pwr_nids = stac92hd95_pwr_nids;
        spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids);
-       spec->default_polarity = -1; /* no default cfg */
+       spec->default_polarity = 0;
 
        codec->patch_ops = stac_patch_ops;
 
+       snd_hda_pick_fixup(codec, stac92hd95_models, stac92hd95_fixup_tbl,
+                          stac92hd95_fixups);
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
+
+       stac_setup_gpio(codec);
+
        err = stac_parse_auto_config(codec);
        if (err < 0) {
                stac_free(codec);
@@ -4592,6 +4646,8 @@ static int patch_stac92hd95(struct hda_codec *codec)
 
        codec->proc_widget_hook = stac92hd_proc_hook;
 
+       snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
+
        return 0;
 }
 
index cbfa1e1..0b9571c 100644 (file)
@@ -225,11 +225,11 @@ config SND_SOC_ADAU1373
 config SND_SOC_ADAU1701
        tristate "Analog Devices ADAU1701 CODEC"
        depends on I2C
-       select SND_SOC_SIGMADSP
+       select SND_SOC_SIGMADSP_I2C
 
 config SND_SOC_ADAU17X1
        tristate
-       select SND_SOC_SIGMADSP
+       select SND_SOC_SIGMADSP_REGMAP
 
 config SND_SOC_ADAU1761
        tristate
@@ -476,6 +476,14 @@ config SND_SOC_SIGMADSP
        tristate
        select CRC32
 
+config SND_SOC_SIGMADSP_I2C
+       tristate
+       select SND_SOC_SIGMADSP
+
+config SND_SOC_SIGMADSP_REGMAP
+       tristate
+       select SND_SOC_SIGMADSP
+
 config SND_SOC_SIRF_AUDIO_CODEC
        tristate "SiRF SoC internal audio codec"
        select REGMAP_MMIO
index be3377b..1bd6e1c 100644 (file)
@@ -77,6 +77,8 @@ snd-soc-sgtl5000-objs := sgtl5000.o
 snd-soc-alc5623-objs := alc5623.o
 snd-soc-alc5632-objs := alc5632.o
 snd-soc-sigmadsp-objs := sigmadsp.o
+snd-soc-sigmadsp-i2c-objs := sigmadsp-i2c.o
+snd-soc-sigmadsp-regmap-objs := sigmadsp-regmap.o
 snd-soc-si476x-objs := si476x.o
 snd-soc-sirf-audio-codec-objs := sirf-audio-codec.o
 snd-soc-sn95031-objs := sn95031.o
@@ -240,6 +242,8 @@ obj-$(CONFIG_SND_SOC_RT5651)        += snd-soc-rt5651.o
 obj-$(CONFIG_SND_SOC_RT5677)   += snd-soc-rt5677.o
 obj-$(CONFIG_SND_SOC_SGTL5000)  += snd-soc-sgtl5000.o
 obj-$(CONFIG_SND_SOC_SIGMADSP) += snd-soc-sigmadsp.o
+obj-$(CONFIG_SND_SOC_SIGMADSP_I2C)     += snd-soc-sigmadsp-i2c.o
+obj-$(CONFIG_SND_SOC_SIGMADSP_REGMAP)  += snd-soc-sigmadsp-regmap.o
 obj-$(CONFIG_SND_SOC_SI476X)   += snd-soc-si476x.o
 obj-$(CONFIG_SND_SOC_SN95031)  +=snd-soc-sn95031.o
 obj-$(CONFIG_SND_SOC_SPDIF)    += snd-soc-spdif-rx.o snd-soc-spdif-tx.o
diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c
new file mode 100644 (file)
index 0000000..246081a
--- /dev/null
@@ -0,0 +1,35 @@
+/*
+ * Load Analog Devices SigmaStudio firmware files
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/i2c.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include "sigmadsp.h"
+
+static int sigma_action_write_i2c(void *control_data,
+       const struct sigma_action *sa, size_t len)
+{
+       return i2c_master_send(control_data, (const unsigned char *)&sa->addr,
+               len);
+}
+
+int process_sigma_firmware(struct i2c_client *client, const char *name)
+{
+       struct sigma_firmware ssfw;
+
+       ssfw.control_data = client;
+       ssfw.write = sigma_action_write_i2c;
+
+       return _process_sigma_firmware(&client->dev, &ssfw, name);
+}
+EXPORT_SYMBOL(process_sigma_firmware);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("SigmaDSP I2C firmware loader");
+MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/sigmadsp-regmap.c b/sound/soc/codecs/sigmadsp-regmap.c
new file mode 100644 (file)
index 0000000..f78ed8d
--- /dev/null
@@ -0,0 +1,36 @@
+/*
+ * Load Analog Devices SigmaStudio firmware files
+ *
+ * Copyright 2009-2011 Analog Devices Inc.
+ *
+ * Licensed under the GPL-2 or later.
+ */
+
+#include <linux/regmap.h>
+#include <linux/export.h>
+#include <linux/module.h>
+
+#include "sigmadsp.h"
+
+static int sigma_action_write_regmap(void *control_data,
+       const struct sigma_action *sa, size_t len)
+{
+       return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
+               sa->payload, len - 2);
+}
+
+int process_sigma_firmware_regmap(struct device *dev, struct regmap *regmap,
+       const char *name)
+{
+       struct sigma_firmware ssfw;
+
+       ssfw.control_data = regmap;
+       ssfw.write = sigma_action_write_regmap;
+
+       return _process_sigma_firmware(dev, &ssfw, name);
+}
+EXPORT_SYMBOL(process_sigma_firmware_regmap);
+
+MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
+MODULE_DESCRIPTION("SigmaDSP regmap firmware loader");
+MODULE_LICENSE("GPL");
index 4068f24..f2de7e0 100644 (file)
@@ -34,23 +34,6 @@ enum {
        SIGMA_ACTION_END,
 };
 
-struct sigma_action {
-       u8 instr;
-       u8 len_hi;
-       __le16 len;
-       __be16 addr;
-       unsigned char payload[];
-} __packed;
-
-struct sigma_firmware {
-       const struct firmware *fw;
-       size_t pos;
-
-       void *control_data;
-       int (*write)(void *control_data, const struct sigma_action *sa,
-                       size_t len);
-};
-
 static inline u32 sigma_action_len(struct sigma_action *sa)
 {
        return (sa->len_hi << 16) | le16_to_cpu(sa->len);
@@ -138,7 +121,7 @@ process_sigma_actions(struct sigma_firmware *ssfw)
        return 0;
 }
 
-static int _process_sigma_firmware(struct device *dev,
+int _process_sigma_firmware(struct device *dev,
        struct sigma_firmware *ssfw, const char *name)
 {
        int ret;
@@ -197,50 +180,6 @@ static int _process_sigma_firmware(struct device *dev,
 
        return ret;
 }
-
-#if IS_ENABLED(CONFIG_I2C)
-
-static int sigma_action_write_i2c(void *control_data,
-       const struct sigma_action *sa, size_t len)
-{
-       return i2c_master_send(control_data, (const unsigned char *)&sa->addr,
-               len);
-}
-
-int process_sigma_firmware(struct i2c_client *client, const char *name)
-{
-       struct sigma_firmware ssfw;
-
-       ssfw.control_data = client;
-       ssfw.write = sigma_action_write_i2c;
-
-       return _process_sigma_firmware(&client->dev, &ssfw, name);
-}
-EXPORT_SYMBOL(process_sigma_firmware);
-
-#endif
-
-#if IS_ENABLED(CONFIG_REGMAP)
-
-static int sigma_action_write_regmap(void *control_data,
-       const struct sigma_action *sa, size_t len)
-{
-       return regmap_raw_write(control_data, be16_to_cpu(sa->addr),
-               sa->payload, len - 2);
-}
-
-int process_sigma_firmware_regmap(struct device *dev, struct regmap *regmap,
-       const char *name)
-{
-       struct sigma_firmware ssfw;
-
-       ssfw.control_data = regmap;
-       ssfw.write = sigma_action_write_regmap;
-
-       return _process_sigma_firmware(dev, &ssfw, name);
-}
-EXPORT_SYMBOL(process_sigma_firmware_regmap);
-
-#endif
+EXPORT_SYMBOL_GPL(_process_sigma_firmware);
 
 MODULE_LICENSE("GPL");
index e439cbd..c47cd23 100644 (file)
 #include <linux/device.h>
 #include <linux/regmap.h>
 
+struct sigma_action {
+       u8 instr;
+       u8 len_hi;
+       __le16 len;
+       __be16 addr;
+       unsigned char payload[];
+} __packed;
+
+struct sigma_firmware {
+       const struct firmware *fw;
+       size_t pos;
+
+       void *control_data;
+       int (*write)(void *control_data, const struct sigma_action *sa,
+                       size_t len);
+};
+
+int _process_sigma_firmware(struct device *dev,
+       struct sigma_firmware *ssfw, const char *name);
+
 struct i2c_client;
 
 extern int process_sigma_firmware(struct i2c_client *client, const char *name);
index 6bb0ea5..a609aaf 100644 (file)
@@ -923,8 +923,8 @@ static int fsl_soc_dma_probe(struct platform_device *pdev)
        dma->dai.pcm_free = fsl_dma_free_dma_buffers;
 
        /* Store the SSI-specific information that we need */
-       dma->ssi_stx_phys = res.start + offsetof(struct ccsr_ssi, stx0);
-       dma->ssi_srx_phys = res.start + offsetof(struct ccsr_ssi, srx0);
+       dma->ssi_stx_phys = res.start + CCSR_SSI_STX0;
+       dma->ssi_srx_phys = res.start + CCSR_SSI_SRX0;
 
        iprop = of_get_property(ssi_np, "fsl,fifo-depth", NULL);
        if (iprop)
index b912d45..d7a6061 100644 (file)
@@ -762,7 +762,7 @@ static int fsl_spdif_vbit_get(struct snd_kcontrol *kcontrol,
        struct regmap *regmap = spdif_priv->regmap;
        u32 val;
 
-       val = regmap_read(regmap, REG_SPDIF_SIS, &val);
+       regmap_read(regmap, REG_SPDIF_SIS, &val);
        ucontrol->value.integer.value[0] = (val & INT_VAL_NOGOOD) != 0;
        regmap_write(regmap, REG_SPDIF_SIC, INT_VAL_NOGOOD);
 
@@ -1076,7 +1076,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
                                goto out;
                        } else if (arate / rate[index] == 1) {
                                /* A little bigger than expect */
-                               sub = (arate - rate[index]) * 100000;
+                               sub = (u64)(arate - rate[index]) * 100000;
                                do_div(sub, rate[index]);
                                if (sub >= savesub)
                                        continue;
@@ -1086,7 +1086,7 @@ static u32 fsl_spdif_txclk_caldiv(struct fsl_spdif_priv *spdif_priv,
                                spdif_priv->txrate[index] = arate;
                        } else if (rate[index] / arate == 1) {
                                /* A little smaller than expect */
-                               sub = (rate[index] - arate) * 100000;
+                               sub = (u64)(rate[index] - arate) * 100000;
                                do_div(sub, rate[index]);
                                if (sub >= savesub)
                                        continue;
index 0849b7b..0db94f4 100644 (file)
@@ -59,7 +59,6 @@ int imx_pcm_dma_init(struct platform_device *pdev)
 {
        return devm_snd_dmaengine_pcm_register(&pdev->dev,
                &imx_dmaengine_pcm_config,
-               SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
                SND_DMAENGINE_PCM_FLAG_COMPAT);
 }
 EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
index 6acb225..2434b6d 100644 (file)
@@ -11,6 +11,7 @@ config SND_PXA2XX_SOC
 config SND_MMP_SOC
        bool "Soc Audio for Marvell MMP chips"
        depends on ARCH_MMP
+       select MMP_SRAM
        select SND_SOC_GENERIC_DMAENGINE_PCM
        select SND_ARM
        help
@@ -40,7 +41,7 @@ config SND_MMP_SOC_SSPA
 
 config SND_PXA2XX_SOC_CORGI
        tristate "SoC Audio support for Sharp Zaurus SL-C7x0"
-       depends on SND_PXA2XX_SOC && PXA_SHARP_C7xx
+       depends on SND_PXA2XX_SOC && PXA_SHARP_C7xx && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8731
        help
@@ -49,7 +50,7 @@ config SND_PXA2XX_SOC_CORGI
 
 config SND_PXA2XX_SOC_SPITZ
        tristate "SoC Audio support for Sharp Zaurus SL-Cxx00"
-       depends on SND_PXA2XX_SOC && PXA_SHARP_Cxx00
+       depends on SND_PXA2XX_SOC && PXA_SHARP_Cxx00 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8750
        help
@@ -58,7 +59,7 @@ config SND_PXA2XX_SOC_SPITZ
 
 config SND_PXA2XX_SOC_Z2
        tristate "SoC Audio support for Zipit Z2"
-       depends on SND_PXA2XX_SOC && MACH_ZIPIT2
+       depends on SND_PXA2XX_SOC && MACH_ZIPIT2 && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8750
        help
@@ -66,7 +67,7 @@ config SND_PXA2XX_SOC_Z2
 
 config SND_PXA2XX_SOC_POODLE
        tristate "SoC Audio support for Poodle"
-       depends on SND_PXA2XX_SOC && MACH_POODLE
+       depends on SND_PXA2XX_SOC && MACH_POODLE && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_SOC_WM8731
        help
@@ -181,7 +182,7 @@ config SND_PXA2XX_SOC_HX4700
 
 config SND_PXA2XX_SOC_MAGICIAN
        tristate "SoC Audio support for HTC Magician"
-       depends on SND_PXA2XX_SOC && MACH_MAGICIAN
+       depends on SND_PXA2XX_SOC && MACH_MAGICIAN && I2C
        select SND_PXA2XX_SOC_I2S
        select SND_PXA_SOC_SSP
        select SND_SOC_UDA1380
index 9188015..4e86265 100644 (file)
@@ -315,7 +315,7 @@ static void rsnd_dma_of_name(struct rsnd_dma *dma,
                dst_mod = mod[index];
        } else {
                src_mod = mod[index];
-               dst_mod = mod[index + 1];
+               dst_mod = mod[index - 1];
        }
 
        index = 0;
index a74b9bf..cdc837e 100644 (file)
@@ -2755,7 +2755,7 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        unsigned int mask = (1 << fls(max)) - 1;
        unsigned int invert = mc->invert;
        unsigned int val;
-       int connect, change;
+       int connect, change, reg_change = 0;
        struct snd_soc_dapm_update update;
        int ret = 0;
 
@@ -2773,20 +2773,23 @@ int snd_soc_dapm_put_volsw(struct snd_kcontrol *kcontrol,
        mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
 
        change = dapm_kcontrol_set_value(kcontrol, val);
-       if (change) {
-               if (reg != SND_SOC_NOPM) {
-                       mask = mask << shift;
-                       val = val << shift;
-
-                       if (snd_soc_test_bits(codec, reg, mask, val)) {
-                               update.kcontrol = kcontrol;
-                               update.reg = reg;
-                               update.mask = mask;
-                               update.val = val;
-                               card->update = &update;
-                       }
 
+       if (reg != SND_SOC_NOPM) {
+               mask = mask << shift;
+               val = val << shift;
+
+               reg_change = snd_soc_test_bits(codec, reg, mask, val);
+       }
+
+       if (change || reg_change) {
+               if (reg_change) {
+                       update.kcontrol = kcontrol;
+                       update.reg = reg;
+                       update.mask = mask;
+                       update.val = val;
+                       card->update = &update;
                }
+               change |= reg_change;
 
                ret = soc_dapm_mixer_update_power(card, kcontrol, connect);
 
index c3b5b7d..a09e5f3 100644 (file)
@@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
 
 static int snd_usb_audio_free(struct snd_usb_audio *chip)
 {
+       struct list_head *p, *n;
+
+       list_for_each_safe(p, n, &chip->ep_list)
+               snd_usb_endpoint_free(p);
+
        mutex_destroy(&chip->mutex);
        kfree(chip);
        return 0;
@@ -585,7 +590,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
                                     struct snd_usb_audio *chip)
 {
        struct snd_card *card;
-       struct list_head *p, *n;
+       struct list_head *p;
 
        if (chip == (void *)-1L)
                return;
@@ -598,14 +603,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
        mutex_lock(&register_mutex);
        chip->num_interfaces--;
        if (chip->num_interfaces <= 0) {
+               struct snd_usb_endpoint *ep;
+
                snd_card_disconnect(card);
                /* release the pcm resources */
                list_for_each(p, &chip->pcm_list) {
                        snd_usb_stream_disconnect(p);
                }
                /* release the endpoint resources */
-               list_for_each_safe(p, n, &chip->ep_list) {
-                       snd_usb_endpoint_free(p);
+               list_for_each_entry(ep, &chip->ep_list, list) {
+                       snd_usb_endpoint_release(ep);
                }
                /* release the midi resources */
                list_for_each(p, &chip->midi_list) {
index 289f582..114e3e7 100644 (file)
@@ -986,20 +986,31 @@ void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
        wait_clear_urbs(ep);
 }
 
+/**
+ * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
+ *
+ * @ep: the endpoint to release
+ *
+ * This function does not care for the endpoint's use count but will tear
+ * down all the streaming URBs immediately.
+ */
+void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
+{
+       release_urbs(ep, 1);
+}
+
 /**
  * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
  *
  * @ep: the list header of the endpoint to free
  *
- * This function does not care for the endpoint's use count but will tear
- * down all the streaming URBs immediately and free all resources.
+ * This free all resources of the given ep.
  */
 void snd_usb_endpoint_free(struct list_head *head)
 {
        struct snd_usb_endpoint *ep;
 
        ep = list_entry(head, struct snd_usb_endpoint, list);
-       release_urbs(ep, 1);
        kfree(ep);
 }
 
index 1c7e8ee..e61ee5c 100644 (file)
@@ -23,6 +23,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
 int  snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
+void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
 void snd_usb_endpoint_free(struct list_head *head);
 
 int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
index c342f70..ee53a42 100644 (file)
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
 
 static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_mutex_lock(&lock->mutex);
 }
 
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
 
 static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
 }
 
index a680ab8..4ec03f8 100644 (file)
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
 
 static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_rdlock(&lock->rwlock);
 
 }
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
 
 static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_wrlock(&lock->rwlock);
 }
 
 static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
 }
 
 static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
 {
-       lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
 }
 
index 23bd69c..6f80360 100644 (file)
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
 static void init_preload(void);
 static void try_init_preload(void)
 {
-       if (!__init_state != done)
+       if (__init_state != done)
                init_preload();
 }
 
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
 
        try_init_preload();
 
-       lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL,
+       lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
                        (unsigned long)_RET_IP_);
        /*
         * Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
 
        try_init_preload();
 
-       lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_mutex_trylock(mutex);
        if (r)
                lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
         */
        r = ll_pthread_mutex_unlock(mutex);
        if (r)
-               lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+               lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 
        return r;
 }
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_rdlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_tryrdlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_trywrlock(rwlock);
        if (r)
                 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
 
         init_preload();
 
-       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+       lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_wrlock(rwlock);
        if (r)
                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
        lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
        r = ll_pthread_rwlock_unlock(rwlock);
        if (r)
-               lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_);
+               lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 
        return r;
 }
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
        ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
 #endif
 
-       printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
-
        lockdep_init();
 
        __init_state = done;
index b83184f..93825a1 100644 (file)
@@ -765,6 +765,9 @@ static void free_arg(struct print_arg *arg)
        case PRINT_BSTRING:
                free(arg->string.string);
                break;
+       case PRINT_BITMASK:
+               free(arg->bitmask.bitmask);
+               break;
        case PRINT_DYNAMIC_ARRAY:
                free(arg->dynarray.index);
                break;
@@ -2268,6 +2271,7 @@ static int arg_num_eval(struct print_arg *arg, long long *val)
        case PRINT_FIELD ... PRINT_SYMBOL:
        case PRINT_STRING:
        case PRINT_BSTRING:
+       case PRINT_BITMASK:
        default:
                do_warning("invalid eval type %d", arg->type);
                ret = 0;
@@ -2296,6 +2300,7 @@ static char *arg_eval (struct print_arg *arg)
        case PRINT_FIELD ... PRINT_SYMBOL:
        case PRINT_STRING:
        case PRINT_BSTRING:
+       case PRINT_BITMASK:
        default:
                do_warning("invalid eval type %d", arg->type);
                break;
@@ -2683,6 +2688,35 @@ process_str(struct event_format *event __maybe_unused, struct print_arg *arg,
        return EVENT_ERROR;
 }
 
+static enum event_type
+process_bitmask(struct event_format *event __maybe_unused, struct print_arg *arg,
+           char **tok)
+{
+       enum event_type type;
+       char *token;
+
+       if (read_expect_type(EVENT_ITEM, &token) < 0)
+               goto out_free;
+
+       arg->type = PRINT_BITMASK;
+       arg->bitmask.bitmask = token;
+       arg->bitmask.offset = -1;
+
+       if (read_expected(EVENT_DELIM, ")") < 0)
+               goto out_err;
+
+       type = read_token(&token);
+       *tok = token;
+
+       return type;
+
+ out_free:
+       free_token(token);
+ out_err:
+       *tok = NULL;
+       return EVENT_ERROR;
+}
+
 static struct pevent_function_handler *
 find_func_handler(struct pevent *pevent, char *func_name)
 {
@@ -2797,6 +2831,10 @@ process_function(struct event_format *event, struct print_arg *arg,
                free_token(token);
                return process_str(event, arg, tok);
        }
+       if (strcmp(token, "__get_bitmask") == 0) {
+               free_token(token);
+               return process_bitmask(event, arg, tok);
+       }
        if (strcmp(token, "__get_dynamic_array") == 0) {
                free_token(token);
                return process_dynamic_array(event, arg, tok);
@@ -3324,6 +3362,7 @@ eval_num_arg(void *data, int size, struct event_format *event, struct print_arg
                return eval_type(val, arg, 0);
        case PRINT_STRING:
        case PRINT_BSTRING:
+       case PRINT_BITMASK:
                return 0;
        case PRINT_FUNC: {
                struct trace_seq s;
@@ -3556,6 +3595,60 @@ static void print_str_to_seq(struct trace_seq *s, const char *format,
                trace_seq_printf(s, format, str);
 }
 
+static void print_bitmask_to_seq(struct pevent *pevent,
+                                struct trace_seq *s, const char *format,
+                                int len_arg, const void *data, int size)
+{
+       int nr_bits = size * 8;
+       int str_size = (nr_bits + 3) / 4;
+       int len = 0;
+       char buf[3];
+       char *str;
+       int index;
+       int i;
+
+       /*
+        * The kernel likes to put in commas every 32 bits, we
+        * can do the same.
+        */
+       str_size += (nr_bits - 1) / 32;
+
+       str = malloc(str_size + 1);
+       if (!str) {
+               do_warning("%s: not enough memory!", __func__);
+               return;
+       }
+       str[str_size] = 0;
+
+       /* Start out with -2 for the two chars per byte */
+       for (i = str_size - 2; i >= 0; i -= 2) {
+               /*
+                * data points to a bit mask of size bytes.
+                * In the kernel, this is an array of long words, thus
+                * endianess is very important.
+                */
+               if (pevent->file_bigendian)
+                       index = size - (len + 1);
+               else
+                       index = len;
+
+               snprintf(buf, 3, "%02x", *((unsigned char *)data + index));
+               memcpy(str + i, buf, 2);
+               len++;
+               if (!(len & 3) && i > 0) {
+                       i--;
+                       str[i] = ',';
+               }
+       }
+
+       if (len_arg >= 0)
+               trace_seq_printf(s, format, len_arg, str);
+       else
+               trace_seq_printf(s, format, str);
+
+       free(str);
+}
+
 static void print_str_arg(struct trace_seq *s, void *data, int size,
                          struct event_format *event, const char *format,
                          int len_arg, struct print_arg *arg)
@@ -3691,6 +3784,23 @@ static void print_str_arg(struct trace_seq *s, void *data, int size,
        case PRINT_BSTRING:
                print_str_to_seq(s, format, len_arg, arg->string.string);
                break;
+       case PRINT_BITMASK: {
+               int bitmask_offset;
+               int bitmask_size;
+
+               if (arg->bitmask.offset == -1) {
+                       struct format_field *f;
+
+                       f = pevent_find_any_field(event, arg->bitmask.bitmask);
+                       arg->bitmask.offset = f->offset;
+               }
+               bitmask_offset = data2host4(pevent, data + arg->bitmask.offset);
+               bitmask_size = bitmask_offset >> 16;
+               bitmask_offset &= 0xffff;
+               print_bitmask_to_seq(pevent, s, format, len_arg,
+                                    data + bitmask_offset, bitmask_size);
+               break;
+       }
        case PRINT_OP:
                /*
                 * The only op for string should be ? :
@@ -4822,6 +4932,9 @@ static void print_args(struct print_arg *args)
        case PRINT_BSTRING:
                printf("__get_str(%s)", args->string.string);
                break;
+       case PRINT_BITMASK:
+               printf("__get_bitmask(%s)", args->bitmask.bitmask);
+               break;
        case PRINT_TYPE:
                printf("(%s)", args->typecast.type);
                print_args(args->typecast.item);
index feab942..7a3873f 100644 (file)
@@ -107,8 +107,8 @@ typedef int (*pevent_event_handler_func)(struct trace_seq *s,
 typedef int (*pevent_plugin_load_func)(struct pevent *pevent);
 typedef int (*pevent_plugin_unload_func)(struct pevent *pevent);
 
-struct plugin_option {
-       struct plugin_option            *next;
+struct pevent_plugin_option {
+       struct pevent_plugin_option     *next;
        void                            *handle;
        char                            *file;
        char                            *name;
@@ -135,7 +135,7 @@ struct plugin_option {
  * PEVENT_PLUGIN_OPTIONS:  (optional)
  *   Plugin options that can be set before loading
  *
- *   struct plugin_option PEVENT_PLUGIN_OPTIONS[] = {
+ *   struct pevent_plugin_option PEVENT_PLUGIN_OPTIONS[] = {
  *     {
  *             .name = "option-name",
  *             .plugin_alias = "overide-file-name", (optional)
@@ -208,6 +208,11 @@ struct print_arg_string {
        int                     offset;
 };
 
+struct print_arg_bitmask {
+       char                    *bitmask;
+       int                     offset;
+};
+
 struct print_arg_field {
        char                    *name;
        struct format_field     *field;
@@ -274,6 +279,7 @@ enum print_arg_type {
        PRINT_DYNAMIC_ARRAY,
        PRINT_OP,
        PRINT_FUNC,
+       PRINT_BITMASK,
 };
 
 struct print_arg {
@@ -288,6 +294,7 @@ struct print_arg {
                struct print_arg_hex            hex;
                struct print_arg_func           func;
                struct print_arg_string         string;
+               struct print_arg_bitmask        bitmask;
                struct print_arg_op             op;
                struct print_arg_dynarray       dynarray;
        };
@@ -354,6 +361,8 @@ enum pevent_func_arg_type {
 
 enum pevent_flag {
        PEVENT_NSEC_OUTPUT              = 1,    /* output in NSECS */
+       PEVENT_DISABLE_SYS_PLUGINS      = 1 << 1,
+       PEVENT_DISABLE_PLUGINS          = 1 << 2,
 };
 
 #define PEVENT_ERRORS                                                        \
@@ -410,9 +419,19 @@ enum pevent_errno {
 
 struct plugin_list;
 
+#define INVALID_PLUGIN_LIST_OPTION     ((char **)((unsigned long)-1))
+
 struct plugin_list *traceevent_load_plugins(struct pevent *pevent);
 void traceevent_unload_plugins(struct plugin_list *plugin_list,
                               struct pevent *pevent);
+char **traceevent_plugin_list_options(void);
+void traceevent_plugin_free_options_list(char **list);
+int traceevent_plugin_add_options(const char *name,
+                                 struct pevent_plugin_option *options);
+void traceevent_plugin_remove_options(struct pevent_plugin_option *options);
+void traceevent_print_plugins(struct trace_seq *s,
+                             const char *prefix, const char *suffix,
+                             const struct plugin_list *list);
 
 struct cmdline;
 struct cmdline_list;
index 0c8bf67..136162c 100644 (file)
@@ -18,6 +18,7 @@
  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
  */
 
+#include <stdio.h>
 #include <string.h>
 #include <dlfcn.h>
 #include <stdlib.h>
 
 #define LOCAL_PLUGIN_DIR ".traceevent/plugins"
 
+static struct registered_plugin_options {
+       struct registered_plugin_options        *next;
+       struct pevent_plugin_option             *options;
+} *registered_options;
+
+static struct trace_plugin_options {
+       struct trace_plugin_options     *next;
+       char                            *plugin;
+       char                            *option;
+       char                            *value;
+} *trace_plugin_options;
+
 struct plugin_list {
        struct plugin_list      *next;
        char                    *name;
        void                    *handle;
 };
 
+/**
+ * traceevent_plugin_list_options - get list of plugin options
+ *
+ * Returns an array of char strings that list the currently registered
+ * plugin options in the format of <plugin>:<option>. This list can be
+ * used by toggling the option.
+ *
+ * Returns NULL if there's no options registered. On error it returns
+ * INVALID_PLUGIN_LIST_OPTION
+ *
+ * Must be freed with traceevent_plugin_free_options_list().
+ */
+char **traceevent_plugin_list_options(void)
+{
+       struct registered_plugin_options *reg;
+       struct pevent_plugin_option *op;
+       char **list = NULL;
+       char *name;
+       int count = 0;
+
+       for (reg = registered_options; reg; reg = reg->next) {
+               for (op = reg->options; op->name; op++) {
+                       char *alias = op->plugin_alias ? op->plugin_alias : op->file;
+                       char **temp = list;
+
+                       name = malloc(strlen(op->name) + strlen(alias) + 2);
+                       if (!name)
+                               goto err;
+
+                       sprintf(name, "%s:%s", alias, op->name);
+                       list = realloc(list, count + 2);
+                       if (!list) {
+                               list = temp;
+                               free(name);
+                               goto err;
+                       }
+                       list[count++] = name;
+                       list[count] = NULL;
+               }
+       }
+       return list;
+
+ err:
+       while (--count >= 0)
+               free(list[count]);
+       free(list);
+
+       return INVALID_PLUGIN_LIST_OPTION;
+}
+
+void traceevent_plugin_free_options_list(char **list)
+{
+       int i;
+
+       if (!list)
+               return;
+
+       if (list == INVALID_PLUGIN_LIST_OPTION)
+               return;
+
+       for (i = 0; list[i]; i++)
+               free(list[i]);
+
+       free(list);
+}
+
+static int
+update_option(const char *file, struct pevent_plugin_option *option)
+{
+       struct trace_plugin_options *op;
+       char *plugin;
+
+       if (option->plugin_alias) {
+               plugin = strdup(option->plugin_alias);
+               if (!plugin)
+                       return -1;
+       } else {
+               char *p;
+               plugin = strdup(file);
+               if (!plugin)
+                       return -1;
+               p = strstr(plugin, ".");
+               if (p)
+                       *p = '\0';
+       }
+
+       /* first look for named options */
+       for (op = trace_plugin_options; op; op = op->next) {
+               if (!op->plugin)
+                       continue;
+               if (strcmp(op->plugin, plugin) != 0)
+                       continue;
+               if (strcmp(op->option, option->name) != 0)
+                       continue;
+
+               option->value = op->value;
+               option->set ^= 1;
+               goto out;
+       }
+
+       /* first look for unnamed options */
+       for (op = trace_plugin_options; op; op = op->next) {
+               if (op->plugin)
+                       continue;
+               if (strcmp(op->option, option->name) != 0)
+                       continue;
+
+               option->value = op->value;
+               option->set ^= 1;
+               break;
+       }
+
+ out:
+       free(plugin);
+       return 0;
+}
+
+/**
+ * traceevent_plugin_add_options - Add a set of options by a plugin
+ * @name: The name of the plugin adding the options
+ * @options: The set of options being loaded
+ *
+ * Sets the options with the values that have been added by user.
+ */
+int traceevent_plugin_add_options(const char *name,
+                                 struct pevent_plugin_option *options)
+{
+       struct registered_plugin_options *reg;
+
+       reg = malloc(sizeof(*reg));
+       if (!reg)
+               return -1;
+       reg->next = registered_options;
+       reg->options = options;
+       registered_options = reg;
+
+       while (options->name) {
+               update_option(name, options);
+               options++;
+       }
+       return 0;
+}
+
+/**
+ * traceevent_plugin_remove_options - remove plugin options that were registered
+ * @options: Options to removed that were registered with traceevent_plugin_add_options
+ */
+void traceevent_plugin_remove_options(struct pevent_plugin_option *options)
+{
+       struct registered_plugin_options **last;
+       struct registered_plugin_options *reg;
+
+       for (last = &registered_options; *last; last = &(*last)->next) {
+               if ((*last)->options == options) {
+                       reg = *last;
+                       *last = reg->next;
+                       free(reg);
+                       return;
+               }
+       }
+}
+
+/**
+ * traceevent_print_plugins - print out the list of plugins loaded
+ * @s: the trace_seq descripter to write to
+ * @prefix: The prefix string to add before listing the option name
+ * @suffix: The suffix string ot append after the option name
+ * @list: The list of plugins (usually returned by traceevent_load_plugins()
+ *
+ * Writes to the trace_seq @s the list of plugins (files) that is
+ * returned by traceevent_load_plugins(). Use @prefix and @suffix for formating:
+ * @prefix = "  ", @suffix = "\n".
+ */
+void traceevent_print_plugins(struct trace_seq *s,
+                             const char *prefix, const char *suffix,
+                             const struct plugin_list *list)
+{
+       while (list) {
+               trace_seq_printf(s, "%s%s%s", prefix, list->name, suffix);
+               list = list->next;
+       }
+}
+
 static void
 load_plugin(struct pevent *pevent, const char *path,
            const char *file, void *data)
@@ -148,12 +344,17 @@ load_plugins(struct pevent *pevent, const char *suffix,
        char *path;
        char *envdir;
 
+       if (pevent->flags & PEVENT_DISABLE_PLUGINS)
+               return;
+
        /*
         * If a system plugin directory was defined,
         * check that first.
         */
 #ifdef PLUGIN_DIR
-       load_plugins_dir(pevent, suffix, PLUGIN_DIR, load_plugin, data);
+       if (!(pevent->flags & PEVENT_DISABLE_SYS_PLUGINS))
+               load_plugins_dir(pevent, suffix, PLUGIN_DIR,
+                                load_plugin, data);
 #endif
 
        /*
index 80ba4ff..a00ec19 100644 (file)
@@ -33,6 +33,29 @@ static int cpus = -1;
 
 #define STK_BLK 10
 
+struct pevent_plugin_option plugin_options[] =
+{
+       {
+               .name = "parent",
+               .plugin_alias = "ftrace",
+               .description =
+               "Print parent of functions for function events",
+       },
+       {
+               .name = "indent",
+               .plugin_alias = "ftrace",
+               .description =
+               "Try to show function call indents, based on parents",
+               .set = 1,
+       },
+       {
+               .name = NULL,
+       }
+};
+
+static struct pevent_plugin_option *ftrace_parent = &plugin_options[0];
+static struct pevent_plugin_option *ftrace_indent = &plugin_options[1];
+
 static void add_child(struct func_stack *stack, const char *child, int pos)
 {
        int i;
@@ -119,7 +142,8 @@ static int function_handler(struct trace_seq *s, struct pevent_record *record,
 
        parent = pevent_find_function(pevent, pfunction);
 
-       index = add_and_get_index(parent, func, record->cpu);
+       if (parent && ftrace_indent->set)
+               index = add_and_get_index(parent, func, record->cpu);
 
        trace_seq_printf(s, "%*s", index*3, "");
 
@@ -128,11 +152,13 @@ static int function_handler(struct trace_seq *s, struct pevent_record *record,
        else
                trace_seq_printf(s, "0x%llx", function);
 
-       trace_seq_printf(s, " <-- ");
-       if (parent)
-               trace_seq_printf(s, "%s", parent);
-       else
-               trace_seq_printf(s, "0x%llx", pfunction);
+       if (ftrace_parent->set) {
+               trace_seq_printf(s, " <-- ");
+               if (parent)
+                       trace_seq_printf(s, "%s", parent);
+               else
+                       trace_seq_printf(s, "0x%llx", pfunction);
+       }
 
        return 0;
 }
@@ -141,6 +167,9 @@ int PEVENT_PLUGIN_LOADER(struct pevent *pevent)
 {
        pevent_register_event_handler(pevent, -1, "ftrace", "function",
                                      function_handler, NULL);
+
+       traceevent_plugin_add_options("ftrace", plugin_options);
+
        return 0;
 }
 
@@ -157,6 +186,8 @@ void PEVENT_PLUGIN_UNLOADER(struct pevent *pevent)
                free(fstack[i].stack);
        }
 
+       traceevent_plugin_remove_options(plugin_options);
+
        free(fstack);
        fstack = NULL;
        cpus = -1;
index cefdf43..d2b59af 100644 (file)
@@ -117,6 +117,22 @@ OPTIONS
        By default, every sort keys not specified in -F will be appended
        automatically.
 
+       If --mem-mode option is used, following sort keys are also available
+       (incompatible with --branch-stack):
+       symbol_daddr, dso_daddr, locked, tlb, mem, snoop, dcacheline.
+
+       - symbol_daddr: name of data symbol being executed on at the time of sample
+       - dso_daddr: name of library or module containing the data being executed
+       on at the time of sample
+       - locked: whether the bus was locked at the time of sample
+       - tlb: type of tlb access for the data at the time of sample
+       - mem: type of memory access for the data at the time of sample
+       - snoop: type of snoop (if any) for the data at the time of sample
+       - dcacheline: the cacheline the data address is on at the time of sample
+
+       And default sort keys are changed to local_weight, mem, sym, dso,
+       symbol_daddr, dso_daddr, snoop, tlb, locked, see '--mem-mode'.
+
 -p::
 --parent=<regex>::
         A regex filter to identify parent. The parent is a caller of this
@@ -260,6 +276,13 @@ OPTIONS
        Demangle symbol names to human readable form. It's enabled by default,
        disable with --no-demangle.
 
+--mem-mode::
+       Use the data addresses of samples in addition to instruction addresses
+       to build the histograms.  To generate meaningful output, the perf.data
+       file must have been obtained using perf record -d -W and using a
+       special event -e cpu/mem-loads/ or -e cpu/mem-stores/. See
+       'perf mem' for simpler access.
+
 --percent-limit::
        Do not show entries which have an overhead under that percent.
        (Default: 0).
index bc5990c..5e0f986 100644 (file)
@@ -43,27 +43,6 @@ TIMECHART OPTIONS
 
 --symfs=<directory>::
         Look for files with symbols relative to this directory.
-
-EXAMPLES
---------
-
-$ perf timechart record git pull
-
-  [ perf record: Woken up 13 times to write data ]
-  [ perf record: Captured and wrote 4.253 MB perf.data (~185801 samples) ]
-
-$ perf timechart
-
-  Written 10.2 seconds of trace to output.svg.
-
-Record system-wide timechart:
-
-  $ perf timechart record
-
-  then generate timechart and highlight 'gcc' tasks:
-
-  $ perf timechart --highlight gcc
-
 -n::
 --proc-num::
         Print task info for at least given number of tasks.
@@ -88,6 +67,26 @@ RECORD OPTIONS
 --callchain::
         Do call-graph (stack chain/backtrace) recording
 
+EXAMPLES
+--------
+
+$ perf timechart record git pull
+
+  [ perf record: Woken up 13 times to write data ]
+  [ perf record: Captured and wrote 4.253 MB perf.data (~185801 samples) ]
+
+$ perf timechart
+
+  Written 10.2 seconds of trace to output.svg.
+
+Record system-wide timechart:
+
+  $ perf timechart record
+
+  then generate timechart and highlight 'gcc' tasks:
+
+  $ perf timechart --highlight gcc
+
 SEE ALSO
 --------
 linkperf:perf-record[1]
index ae20edf..9670a16 100644 (file)
@@ -819,15 +819,15 @@ TAG_FOLDERS= . ../lib/traceevent ../lib/api ../lib/symbol
 TAG_FILES= ../../include/uapi/linux/perf_event.h
 
 TAGS:
-       $(RM) TAGS
+       $(QUIET_GEN)$(RM) TAGS; \
        $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs etags -a $(TAG_FILES)
 
 tags:
-       $(RM) tags
+       $(QUIET_GEN)$(RM) tags; \
        $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs ctags -a $(TAG_FILES)
 
 cscope:
-       $(RM) cscope*
+       $(QUIET_GEN)$(RM) cscope*; \
        $(FIND) $(TAG_FOLDERS) -name '*.[hcS]' -print | xargs cscope -b $(TAG_FILES)
 
 ### Detect prefix changes
index 6a3af00..16c7c11 100644 (file)
@@ -72,7 +72,7 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
        if (ret)
                return ret;
 
-       if (&inject->output.is_pipe)
+       if (!inject->output.is_pipe)
                return 0;
 
        return perf_event__repipe_synth(tool, event);
index cdcd4eb..c63fa29 100644 (file)
@@ -288,6 +288,13 @@ static void cleanup_params(void)
        memset(&params, 0, sizeof(params));
 }
 
+static void pr_err_with_code(const char *msg, int err)
+{
+       pr_err("%s", msg);
+       pr_debug(" Reason: %s (Code: %d)", strerror(-err), err);
+       pr_err("\n");
+}
+
 static int
 __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
 {
@@ -379,7 +386,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                }
                ret = parse_probe_event_argv(argc, argv);
                if (ret < 0) {
-                       pr_err("  Error: Parse Error.  (%d)\n", ret);
+                       pr_err_with_code("  Error: Command Parse Error.", ret);
                        return ret;
                }
        }
@@ -419,8 +426,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                }
                ret = show_perf_probe_events();
                if (ret < 0)
-                       pr_err("  Error: Failed to show event list. (%d)\n",
-                              ret);
+                       pr_err_with_code("  Error: Failed to show event list.", ret);
                return ret;
        }
        if (params.show_funcs) {
@@ -445,8 +451,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                strfilter__delete(params.filter);
                params.filter = NULL;
                if (ret < 0)
-                       pr_err("  Error: Failed to show functions."
-                              " (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to show functions.", ret);
                return ret;
        }
 
@@ -464,7 +469,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
 
                ret = show_line_range(&params.line_range, params.target);
                if (ret < 0)
-                       pr_err("  Error: Failed to show lines. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to show lines.", ret);
                return ret;
        }
        if (params.show_vars) {
@@ -485,7 +490,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                strfilter__delete(params.filter);
                params.filter = NULL;
                if (ret < 0)
-                       pr_err("  Error: Failed to show vars. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to show vars.", ret);
                return ret;
        }
 #endif
@@ -493,7 +498,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        if (params.dellist) {
                ret = del_perf_probe_events(params.dellist);
                if (ret < 0) {
-                       pr_err("  Error: Failed to delete events. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to delete events.", ret);
                        return ret;
                }
        }
@@ -504,7 +509,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                                            params.target,
                                            params.force_add);
                if (ret < 0) {
-                       pr_err("  Error: Failed to add events. (%d)\n", ret);
+                       pr_err_with_code("  Error: Failed to add events.", ret);
                        return ret;
                }
        }
index 4f100b5..f30ac5e 100644 (file)
@@ -299,7 +299,11 @@ else
       NO_LIBUNWIND := 1
       NO_LIBDW_DWARF_UNWIND := 1
     else
-      msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
+      ifneq ($(filter s% -static%,$(LDFLAGS),),)
+        msg := $(error No static glibc found, please install glibc-static);
+      else
+        msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]);
+      endif
     endif
   else
     ifndef NO_LIBDW_DWARF_UNWIND
index 78f7b92..95c58fc 100644 (file)
@@ -458,6 +458,7 @@ int main(int argc, const char **argv)
 
        /* The page_size is placed in util object. */
        page_size = sysconf(_SC_PAGE_SIZE);
+       cacheline_size = sysconf(_SC_LEVEL1_DCACHE_LINESIZE);
 
        cmd = perf_extract_argv0_path(argv[0]);
        if (!cmd)
index 802e3cd..6f8b01b 100644 (file)
@@ -3,6 +3,8 @@
  *
  * Builtin regression testing command: ever growing number of sanity tests
  */
+#include <unistd.h>
+#include <string.h>
 #include "builtin.h"
 #include "intlist.h"
 #include "tests.h"
@@ -50,9 +52,17 @@ static struct test {
                .func = test__pmu,
        },
        {
-               .desc = "Test dso data interface",
+               .desc = "Test dso data read",
                .func = test__dso_data,
        },
+       {
+               .desc = "Test dso data cache",
+               .func = test__dso_data_cache,
+       },
+       {
+               .desc = "Test dso data reopen",
+               .func = test__dso_data_reopen,
+       },
        {
                .desc = "roundtrip evsel->name check",
                .func = test__perf_evsel__roundtrip_name_test,
@@ -172,6 +182,34 @@ static bool perf_test__matches(int curr, int argc, const char *argv[])
        return false;
 }
 
+static int run_test(struct test *test)
+{
+       int status, err = -1, child = fork();
+
+       if (child < 0) {
+               pr_err("failed to fork test: %s\n", strerror(errno));
+               return -1;
+       }
+
+       if (!child) {
+               pr_debug("test child forked, pid %d\n", getpid());
+               err = test->func();
+               exit(err);
+       }
+
+       wait(&status);
+
+       if (WIFEXITED(status)) {
+               err = WEXITSTATUS(status);
+               pr_debug("test child finished with %d\n", err);
+       } else if (WIFSIGNALED(status)) {
+               err = -1;
+               pr_debug("test child interrupted\n");
+       }
+
+       return err;
+}
+
 static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
 {
        int i = 0;
@@ -200,7 +238,7 @@ static int __cmd_test(int argc, const char *argv[], struct intlist *skiplist)
                }
 
                pr_debug("\n--- start ---\n");
-               err = tests[curr].func();
+               err = run_test(&tests[curr]);
                pr_debug("---- end ----\n%s:", tests[curr].desc);
 
                switch (err) {
index 3e6cb17..630808c 100644 (file)
@@ -1,22 +1,27 @@
-#include "util.h"
-
 #include <stdlib.h>
 #include <linux/types.h>
 #include <sys/stat.h>
 #include <fcntl.h>
 #include <string.h>
-
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <api/fs/fs.h>
+#include "util.h"
 #include "machine.h"
 #include "symbol.h"
 #include "tests.h"
 
 static char *test_file(int size)
 {
-       static char buf_templ[] = "/tmp/test-XXXXXX";
+#define TEMPL "/tmp/perf-test-XXXXXX"
+       static char buf_templ[sizeof(TEMPL)];
        char *templ = buf_templ;
        int fd, i;
        unsigned char *buf;
 
+       strcpy(buf_templ, TEMPL);
+#undef TEMPL
+
        fd = mkstemp(templ);
        if (fd < 0) {
                perror("mkstemp failed");
@@ -150,3 +155,204 @@ int test__dso_data(void)
        unlink(file);
        return 0;
 }
+
+static long open_files_cnt(void)
+{
+       char path[PATH_MAX];
+       struct dirent *dent;
+       DIR *dir;
+       long nr = 0;
+
+       scnprintf(path, PATH_MAX, "%s/self/fd", procfs__mountpoint());
+       pr_debug("fd path: %s\n", path);
+
+       dir = opendir(path);
+       TEST_ASSERT_VAL("failed to open fd directory", dir);
+
+       while ((dent = readdir(dir)) != NULL) {
+               if (!strcmp(dent->d_name, ".") ||
+                   !strcmp(dent->d_name, ".."))
+                       continue;
+
+               nr++;
+       }
+
+       closedir(dir);
+       return nr - 1;
+}
+
+static struct dso **dsos;
+
+static int dsos__create(int cnt, int size)
+{
+       int i;
+
+       dsos = malloc(sizeof(dsos) * cnt);
+       TEST_ASSERT_VAL("failed to alloc dsos array", dsos);
+
+       for (i = 0; i < cnt; i++) {
+               char *file;
+
+               file = test_file(size);
+               TEST_ASSERT_VAL("failed to get dso file", file);
+
+               dsos[i] = dso__new(file);
+               TEST_ASSERT_VAL("failed to get dso", dsos[i]);
+       }
+
+       return 0;
+}
+
+static void dsos__delete(int cnt)
+{
+       int i;
+
+       for (i = 0; i < cnt; i++) {
+               struct dso *dso = dsos[i];
+
+               unlink(dso->name);
+               dso__delete(dso);
+       }
+
+       free(dsos);
+}
+
+static int set_fd_limit(int n)
+{
+       struct rlimit rlim;
+
+       if (getrlimit(RLIMIT_NOFILE, &rlim))
+               return -1;
+
+       pr_debug("file limit %ld, new %d\n", (long) rlim.rlim_cur, n);
+
+       rlim.rlim_cur = n;
+       return setrlimit(RLIMIT_NOFILE, &rlim);
+}
+
+int test__dso_data_cache(void)
+{
+       struct machine machine;
+       long nr_end, nr = open_files_cnt();
+       int dso_cnt, limit, i, fd;
+
+       memset(&machine, 0, sizeof(machine));
+
+       /* set as system limit */
+       limit = nr * 4;
+       TEST_ASSERT_VAL("failed to set file limit", !set_fd_limit(limit));
+
+       /* and this is now our dso open FDs limit + 1 extra */
+       dso_cnt = limit / 2 + 1;
+       TEST_ASSERT_VAL("failed to create dsos\n",
+               !dsos__create(dso_cnt, TEST_FILE_SIZE));
+
+       for (i = 0; i < (dso_cnt - 1); i++) {
+               struct dso *dso = dsos[i];
+
+               /*
+                * Open dsos via dso__data_fd or dso__data_read_offset.
+                * Both opens the data file and keep it open.
+                */
+               if (i % 2) {
+                       fd = dso__data_fd(dso, &machine);
+                       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+               } else {
+                       #define BUFSIZE 10
+                       u8 buf[BUFSIZE];
+                       ssize_t n;
+
+                       n = dso__data_read_offset(dso, &machine, 0, buf, BUFSIZE);
+                       TEST_ASSERT_VAL("failed to read dso", n == BUFSIZE);
+               }
+       }
+
+       /* open +1 dso over the allowed limit */
+       fd = dso__data_fd(dsos[i], &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /* should force the first one to be closed */
+       TEST_ASSERT_VAL("failed to close dsos[0]", dsos[0]->data.fd == -1);
+
+       /* cleanup everything */
+       dsos__delete(dso_cnt);
+
+       /* Make sure we did not leak any file descriptor. */
+       nr_end = open_files_cnt();
+       pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
+       TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+       return 0;
+}
+
+int test__dso_data_reopen(void)
+{
+       struct machine machine;
+       long nr_end, nr = open_files_cnt();
+       int fd, fd_extra;
+
+#define dso_0 (dsos[0])
+#define dso_1 (dsos[1])
+#define dso_2 (dsos[2])
+
+       memset(&machine, 0, sizeof(machine));
+
+       /*
+        * Test scenario:
+        * - create 3 dso objects
+        * - set process file descriptor limit to current
+        *   files count + 3
+        * - test that the first dso gets closed when we
+        *   reach the files count limit
+        */
+
+       /* Make sure we are able to open 3 fds anyway */
+       TEST_ASSERT_VAL("failed to set file limit",
+                       !set_fd_limit((nr + 3)));
+
+       TEST_ASSERT_VAL("failed to create dsos\n", !dsos__create(3, TEST_FILE_SIZE));
+
+       /* open dso_0 */
+       fd = dso__data_fd(dso_0, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /* open dso_1 */
+       fd = dso__data_fd(dso_1, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /*
+        * open extra file descriptor and we just
+        * reached the files count limit
+        */
+       fd_extra = open("/dev/null", O_RDONLY);
+       TEST_ASSERT_VAL("failed to open extra fd", fd_extra > 0);
+
+       /* open dso_2 */
+       fd = dso__data_fd(dso_2, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /*
+        * dso_0 should get closed, because we reached
+        * the file descriptor limit
+        */
+       TEST_ASSERT_VAL("failed to close dso_0", dso_0->data.fd == -1);
+
+       /* open dso_0 */
+       fd = dso__data_fd(dso_0, &machine);
+       TEST_ASSERT_VAL("failed to get fd", fd > 0);
+
+       /*
+        * dso_1 should get closed, because we reached
+        * the file descriptor limit
+        */
+       TEST_ASSERT_VAL("failed to close dso_1", dso_1->data.fd == -1);
+
+       /* cleanup everything */
+       close(fd_extra);
+       dsos__delete(3);
+
+       /* Make sure we did not leak any file descriptor. */
+       nr_end = open_files_cnt();
+       pr_debug("nr start %ld, nr stop %ld\n", nr, nr_end);
+       TEST_ASSERT_VAL("failed leadking files", nr == nr_end);
+       return 0;
+}
index 108f0cd..96adb73 100644 (file)
@@ -15,7 +15,7 @@ static int mmap_handler(struct perf_tool *tool __maybe_unused,
                        struct perf_sample *sample __maybe_unused,
                        struct machine *machine)
 {
-       return machine__process_mmap_event(machine, event, NULL);
+       return machine__process_mmap2_event(machine, event, NULL);
 }
 
 static int init_live_machine(struct machine *machine)
index 2f92d6e..69a71ff 100644 (file)
@@ -205,8 +205,7 @@ $(run):
        ( eval $$cmd ) >> $@ 2>&1; \
        echo "  test: $(call test,$@)" >> $@ 2>&1; \
        $(call test,$@) && \
-       rm -f $@ \
-       rm -rf $$TMP_DEST
+       rm -rf $@ $$TMP_DEST || (cat $@ ; false)
 
 $(run_O):
        $(call clean)
@@ -217,9 +216,7 @@ $(run_O):
        ( eval $$cmd ) >> $@ 2>&1 && \
        echo "  test: $(call test_O,$@)" >> $@ 2>&1; \
        $(call test_O,$@) && \
-       rm -f $@ && \
-       rm -rf $$TMP_O \
-       rm -rf $$TMP_DEST
+       rm -rf $@ $$TMP_O $$TMP_DEST || (cat $@ ; false)
 
 tarpkg:
        @cmd="$(PERF)/tests/perf-targz-src-pkg $(PERF)"; \
index 022bb68..ed64790 100644 (file)
@@ -28,6 +28,8 @@ int test__syscall_open_tp_fields(void);
 int test__pmu(void);
 int test__attr(void);
 int test__dso_data(void);
+int test__dso_data_cache(void);
+int test__dso_data_reopen(void);
 int test__parse_events(void);
 int test__hists_link(void);
 int test__python_use(void);
index 52c03fb..04a229a 100644 (file)
@@ -17,6 +17,7 @@
 #include "../util.h"
 #include "../ui.h"
 #include "map.h"
+#include "annotate.h"
 
 struct hist_browser {
        struct ui_browser   b;
@@ -1593,13 +1594,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                         bi->to.sym->name) > 0)
                                annotate_t = nr_options++;
                } else {
-
                        if (browser->selection != NULL &&
                            browser->selection->sym != NULL &&
-                           !browser->selection->map->dso->annotate_warned &&
-                               asprintf(&options[nr_options], "Annotate %s",
-                                        browser->selection->sym->name) > 0)
-                               annotate = nr_options++;
+                           !browser->selection->map->dso->annotate_warned) {
+                               struct annotation *notes;
+
+                               notes = symbol__annotation(browser->selection->sym);
+
+                               if (notes->src &&
+                                   asprintf(&options[nr_options], "Annotate %s",
+                                                browser->selection->sym->name) > 0)
+                                       annotate = nr_options++;
+                       }
                }
 
                if (thread != NULL &&
@@ -1656,6 +1662,7 @@ retry_popup_menu:
 
                if (choice == annotate || choice == annotate_t || choice == annotate_f) {
                        struct hist_entry *he;
+                       struct annotation *notes;
                        int err;
 do_annotate:
                        if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1686,10 @@ do_annotate:
                                he->ms.map = he->branch_info->to.map;
                        }
 
+                       notes = symbol__annotation(he->ms.sym);
+                       if (!notes->src)
+                               continue;
+
                        /*
                         * Don't let this be freed, say, by hists__decay_entry.
                         */
index 64453d6..819f104 100644 (file)
@@ -1,3 +1,6 @@
+#include <asm/bug.h>
+#include <sys/time.h>
+#include <sys/resource.h>
 #include "symbol.h"
 #include "dso.h"
 #include "machine.h"
@@ -136,7 +139,48 @@ int dso__read_binary_type_filename(const struct dso *dso,
        return ret;
 }
 
-static int open_dso(struct dso *dso, struct machine *machine)
+/*
+ * Global list of open DSOs and the counter.
+ */
+static LIST_HEAD(dso__data_open);
+static long dso__data_open_cnt;
+
+static void dso__list_add(struct dso *dso)
+{
+       list_add_tail(&dso->data.open_entry, &dso__data_open);
+       dso__data_open_cnt++;
+}
+
+static void dso__list_del(struct dso *dso)
+{
+       list_del(&dso->data.open_entry);
+       WARN_ONCE(dso__data_open_cnt <= 0,
+                 "DSO data fd counter out of bounds.");
+       dso__data_open_cnt--;
+}
+
+static void close_first_dso(void);
+
+static int do_open(char *name)
+{
+       int fd;
+
+       do {
+               fd = open(name, O_RDONLY);
+               if (fd >= 0)
+                       return fd;
+
+               pr_debug("dso open failed, mmap: %s\n", strerror(errno));
+               if (!dso__data_open_cnt || errno != EMFILE)
+                       break;
+
+               close_first_dso();
+       } while (1);
+
+       return -1;
+}
+
+static int __open_dso(struct dso *dso, struct machine *machine)
 {
        int fd;
        char *root_dir = (char *)"";
@@ -154,11 +198,130 @@ static int open_dso(struct dso *dso, struct machine *machine)
                return -EINVAL;
        }
 
-       fd = open(name, O_RDONLY);
+       fd = do_open(name);
        free(name);
        return fd;
 }
 
+static void check_data_close(void);
+
+/**
+ * dso_close - Open DSO data file
+ * @dso: dso object
+ *
+ * Open @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static int open_dso(struct dso *dso, struct machine *machine)
+{
+       int fd = __open_dso(dso, machine);
+
+       if (fd > 0) {
+               dso__list_add(dso);
+               /*
+                * Check if we crossed the allowed number
+                * of opened DSOs and close one if needed.
+                */
+               check_data_close();
+       }
+
+       return fd;
+}
+
+static void close_data_fd(struct dso *dso)
+{
+       if (dso->data.fd >= 0) {
+               close(dso->data.fd);
+               dso->data.fd = -1;
+               dso->data.file_size = 0;
+               dso__list_del(dso);
+       }
+}
+
+/**
+ * dso_close - Close DSO data file
+ * @dso: dso object
+ *
+ * Close @dso's data file descriptor and updates
+ * list/count of open DSO objects.
+ */
+static void close_dso(struct dso *dso)
+{
+       close_data_fd(dso);
+}
+
+static void close_first_dso(void)
+{
+       struct dso *dso;
+
+       dso = list_first_entry(&dso__data_open, struct dso, data.open_entry);
+       close_dso(dso);
+}
+
+static rlim_t get_fd_limit(void)
+{
+       struct rlimit l;
+       rlim_t limit = 0;
+
+       /* Allow half of the current open fd limit. */
+       if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
+               if (l.rlim_cur == RLIM_INFINITY)
+                       limit = l.rlim_cur;
+               else
+                       limit = l.rlim_cur / 2;
+       } else {
+               pr_err("failed to get fd limit\n");
+               limit = 1;
+       }
+
+       return limit;
+}
+
+static bool may_cache_fd(void)
+{
+       static rlim_t limit;
+
+       if (!limit)
+               limit = get_fd_limit();
+
+       if (limit == RLIM_INFINITY)
+               return true;
+
+       return limit > (rlim_t) dso__data_open_cnt;
+}
+
+/*
+ * Check and close LRU dso if we crossed allowed limit
+ * for opened dso file descriptors. The limit is half
+ * of the RLIMIT_NOFILE files opened.
+*/
+static void check_data_close(void)
+{
+       bool cache_fd = may_cache_fd();
+
+       if (!cache_fd)
+               close_first_dso();
+}
+
+/**
+ * dso__data_close - Close DSO data file
+ * @dso: dso object
+ *
+ * External interface to close @dso's data file descriptor.
+ */
+void dso__data_close(struct dso *dso)
+{
+       close_dso(dso);
+}
+
+/**
+ * dso__data_fd - Get dso's data file descriptor
+ * @dso: dso object
+ * @machine: machine object
+ *
+ * External interface to find dso's file, open it and
+ * returns file descriptor.
+ */
 int dso__data_fd(struct dso *dso, struct machine *machine)
 {
        enum dso_binary_type binary_type_data[] = {
@@ -168,8 +331,13 @@ int dso__data_fd(struct dso *dso, struct machine *machine)
        };
        int i = 0;
 
-       if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND)
-               return open_dso(dso, machine);
+       if (dso->data.fd >= 0)
+               return dso->data.fd;
+
+       if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
+               dso->data.fd = open_dso(dso, machine);
+               return dso->data.fd;
+       }
 
        do {
                int fd;
@@ -178,7 +346,7 @@ int dso__data_fd(struct dso *dso, struct machine *machine)
 
                fd = open_dso(dso, machine);
                if (fd >= 0)
-                       return fd;
+                       return dso->data.fd = fd;
 
        } while (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND);
 
@@ -260,16 +428,10 @@ dso_cache__memcpy(struct dso_cache *cache, u64 offset,
 }
 
 static ssize_t
-dso_cache__read(struct dso *dso, struct machine *machine,
-                u64 offset, u8 *data, ssize_t size)
+dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
 {
        struct dso_cache *cache;
        ssize_t ret;
-       int fd;
-
-       fd = dso__data_fd(dso, machine);
-       if (fd < 0)
-               return -1;
 
        do {
                u64 cache_offset;
@@ -283,16 +445,16 @@ dso_cache__read(struct dso *dso, struct machine *machine,
                cache_offset = offset & DSO__DATA_CACHE_MASK;
                ret = -EINVAL;
 
-               if (-1 == lseek(fd, cache_offset, SEEK_SET))
+               if (-1 == lseek(dso->data.fd, cache_offset, SEEK_SET))
                        break;
 
-               ret = read(fd, cache->data, DSO__DATA_CACHE_SIZE);
+               ret = read(dso->data.fd, cache->data, DSO__DATA_CACHE_SIZE);
                if (ret <= 0)
                        break;
 
                cache->offset = cache_offset;
                cache->size   = ret;
-               dso_cache__insert(&dso->cache, cache);
+               dso_cache__insert(&dso->data.cache, cache);
 
                ret = dso_cache__memcpy(cache, offset, data, size);
 
@@ -301,24 +463,27 @@ dso_cache__read(struct dso *dso, struct machine *machine,
        if (ret <= 0)
                free(cache);
 
-       close(fd);
        return ret;
 }
 
-static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
-                             u64 offset, u8 *data, ssize_t size)
+static ssize_t dso_cache_read(struct dso *dso, u64 offset,
+                             u8 *data, ssize_t size)
 {
        struct dso_cache *cache;
 
-       cache = dso_cache__find(&dso->cache, offset);
+       cache = dso_cache__find(&dso->data.cache, offset);
        if (cache)
                return dso_cache__memcpy(cache, offset, data, size);
        else
-               return dso_cache__read(dso, machine, offset, data, size);
+               return dso_cache__read(dso, offset, data, size);
 }
 
-ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
-                             u64 offset, u8 *data, ssize_t size)
+/*
+ * Reads and caches dso data DSO__DATA_CACHE_SIZE size chunks
+ * in the rb_tree. Any read to already cached data is served
+ * by cached data.
+ */
+static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
 {
        ssize_t r = 0;
        u8 *p = data;
@@ -326,7 +491,7 @@ ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
        do {
                ssize_t ret;
 
-               ret = dso_cache_read(dso, machine, offset, p, size);
+               ret = dso_cache_read(dso, offset, p, size);
                if (ret < 0)
                        return ret;
 
@@ -346,6 +511,67 @@ ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
        return r;
 }
 
+static int data_file_size(struct dso *dso)
+{
+       struct stat st;
+
+       if (!dso->data.file_size) {
+               if (fstat(dso->data.fd, &st)) {
+                       pr_err("dso mmap failed, fstat: %s\n", strerror(errno));
+                       return -1;
+               }
+               dso->data.file_size = st.st_size;
+       }
+
+       return 0;
+}
+
+static ssize_t data_read_offset(struct dso *dso, u64 offset,
+                               u8 *data, ssize_t size)
+{
+       if (data_file_size(dso))
+               return -1;
+
+       /* Check the offset sanity. */
+       if (offset > dso->data.file_size)
+               return -1;
+
+       if (offset + size < offset)
+               return -1;
+
+       return cached_read(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_offset - Read data from dso file offset
+ * @dso: dso object
+ * @machine: machine object
+ * @offset: file offset
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso file offset. Open
+ * dso data file and use cached_read to get the data.
+ */
+ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
+                             u64 offset, u8 *data, ssize_t size)
+{
+       if (dso__data_fd(dso, machine) < 0)
+               return -1;
+
+       return data_read_offset(dso, offset, data, size);
+}
+
+/**
+ * dso__data_read_addr - Read data from dso address
+ * @dso: dso object
+ * @machine: machine object
+ * @add: virtual memory address
+ * @data: buffer to store data
+ * @size: size of the @data buffer
+ *
+ * External interface to read data from dso address.
+ */
 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
                            struct machine *machine, u64 addr,
                            u8 *data, ssize_t size)
@@ -473,7 +699,8 @@ struct dso *dso__new(const char *name)
                dso__set_short_name(dso, dso->name, false);
                for (i = 0; i < MAP__NR_TYPES; ++i)
                        dso->symbols[i] = dso->symbol_names[i] = RB_ROOT;
-               dso->cache = RB_ROOT;
+               dso->data.cache = RB_ROOT;
+               dso->data.fd = -1;
                dso->symtab_type = DSO_BINARY_TYPE__NOT_FOUND;
                dso->binary_type = DSO_BINARY_TYPE__NOT_FOUND;
                dso->loaded = 0;
@@ -485,6 +712,7 @@ struct dso *dso__new(const char *name)
                dso->kernel = DSO_TYPE_USER;
                dso->needs_swap = DSO_SWAP__UNSET;
                INIT_LIST_HEAD(&dso->node);
+               INIT_LIST_HEAD(&dso->data.open_entry);
        }
 
        return dso;
@@ -506,7 +734,8 @@ void dso__delete(struct dso *dso)
                dso->long_name_allocated = false;
        }
 
-       dso_cache__free(&dso->cache);
+       dso__data_close(dso);
+       dso_cache__free(&dso->data.cache);
        dso__free_a2l(dso);
        zfree(&dso->symsrc_filename);
        free(dso);
index 38efe95..ad553ba 100644 (file)
@@ -76,7 +76,6 @@ struct dso {
        struct list_head node;
        struct rb_root   symbols[MAP__NR_TYPES];
        struct rb_root   symbol_names[MAP__NR_TYPES];
-       struct rb_root   cache;
        void             *a2l;
        char             *symsrc_filename;
        unsigned int     a2l_fails;
@@ -99,6 +98,15 @@ struct dso {
        const char       *long_name;
        u16              long_name_len;
        u16              short_name_len;
+
+       /* dso data file */
+       struct {
+               struct rb_root   cache;
+               int              fd;
+               size_t           file_size;
+               struct list_head open_entry;
+       } data;
+
        char             name[0];
 };
 
@@ -141,7 +149,47 @@ char dso__symtab_origin(const struct dso *dso);
 int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
                                   char *root_dir, char *filename, size_t size);
 
+/*
+ * The dso__data_* external interface provides following functions:
+ *   dso__data_fd
+ *   dso__data_close
+ *   dso__data_read_offset
+ *   dso__data_read_addr
+ *
+ * Please refer to the dso.c object code for each function and
+ * arguments documentation. Following text tries to explain the
+ * dso file descriptor caching.
+ *
+ * The dso__data* interface allows caching of opened file descriptors
+ * to speed up the dso data accesses. The idea is to leave the file
+ * descriptor opened ideally for the whole life of the dso object.
+ *
+ * The current usage of the dso__data_* interface is as follows:
+ *
+ * Get DSO's fd:
+ *   int fd = dso__data_fd(dso, machine);
+ *   USE 'fd' SOMEHOW
+ *
+ * Read DSO's data:
+ *   n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE);
+ *   n = dso__data_read_addr(dso_0, &machine, 0, buf, BUFSIZE);
+ *
+ * Eventually close DSO's fd:
+ *   dso__data_close(dso);
+ *
+ * It is not necessary to close the DSO object data file. Each time new
+ * DSO data file is opened, the limit (RLIMIT_NOFILE/2) is checked. Once
+ * it is crossed, the oldest opened DSO object is closed.
+ *
+ * The dso__delete function calls close_dso function to ensure the
+ * data file descriptor gets closed/unmapped before the dso object
+ * is freed.
+ *
+ * TODO
+*/
 int dso__data_fd(struct dso *dso, struct machine *machine);
+void dso__data_close(struct dso *dso);
+
 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
                              u64 offset, u8 *data, ssize_t size);
 ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
index 65795b8..d0281bd 100644 (file)
@@ -1,4 +1,5 @@
 #include <linux/types.h>
+#include <sys/mman.h>
 #include "event.h"
 #include "debug.h"
 #include "hist.h"
@@ -178,13 +179,14 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                return -1;
        }
 
-       event->header.type = PERF_RECORD_MMAP;
+       event->header.type = PERF_RECORD_MMAP2;
 
        while (1) {
                char bf[BUFSIZ];
                char prot[5];
                char execname[PATH_MAX];
                char anonstr[] = "//anon";
+               unsigned int ino;
                size_t size;
                ssize_t n;
 
@@ -195,15 +197,20 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                strcpy(execname, "");
 
                /* 00400000-0040c000 r-xp 00000000 fd:01 41038  /bin/cat */
-               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %*x:%*x %*u %s\n",
-                      &event->mmap.start, &event->mmap.len, prot,
-                      &event->mmap.pgoff,
-                      execname);
+               n = sscanf(bf, "%"PRIx64"-%"PRIx64" %s %"PRIx64" %x:%x %u %s\n",
+                      &event->mmap2.start, &event->mmap2.len, prot,
+                      &event->mmap2.pgoff, &event->mmap2.maj,
+                      &event->mmap2.min,
+                      &ino, execname);
+
                /*
                 * Anon maps don't have the execname.
                 */
-               if (n < 4)
+               if (n < 7)
                        continue;
+
+               event->mmap2.ino = (u64)ino;
+
                /*
                 * Just like the kernel, see __perf_event_mmap in kernel/perf_event.c
                 */
@@ -212,6 +219,21 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                else
                        event->header.misc = PERF_RECORD_MISC_GUEST_USER;
 
+               /* map protection and flags bits */
+               event->mmap2.prot = 0;
+               event->mmap2.flags = 0;
+               if (prot[0] == 'r')
+                       event->mmap2.prot |= PROT_READ;
+               if (prot[1] == 'w')
+                       event->mmap2.prot |= PROT_WRITE;
+               if (prot[2] == 'x')
+                       event->mmap2.prot |= PROT_EXEC;
+
+               if (prot[3] == 's')
+                       event->mmap2.flags |= MAP_SHARED;
+               else
+                       event->mmap2.flags |= MAP_PRIVATE;
+
                if (prot[2] != 'x') {
                        if (!mmap_data || prot[0] != 'r')
                                continue;
@@ -223,15 +245,15 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                        strcpy(execname, anonstr);
 
                size = strlen(execname) + 1;
-               memcpy(event->mmap.filename, execname, size);
+               memcpy(event->mmap2.filename, execname, size);
                size = PERF_ALIGN(size, sizeof(u64));
-               event->mmap.len -= event->mmap.start;
-               event->mmap.header.size = (sizeof(event->mmap) -
-                                       (sizeof(event->mmap.filename) - size));
-               memset(event->mmap.filename + size, 0, machine->id_hdr_size);
-               event->mmap.header.size += machine->id_hdr_size;
-               event->mmap.pid = tgid;
-               event->mmap.tid = pid;
+               event->mmap2.len -= event->mmap.start;
+               event->mmap2.header.size = (sizeof(event->mmap2) -
+                                       (sizeof(event->mmap2.filename) - size));
+               memset(event->mmap2.filename + size, 0, machine->id_hdr_size);
+               event->mmap2.header.size += machine->id_hdr_size;
+               event->mmap2.pid = tgid;
+               event->mmap2.tid = pid;
 
                if (process(tool, event, &synth_sample, machine) != 0) {
                        rc = -1;
@@ -612,12 +634,15 @@ size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp)
 {
        return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64
-                          " %02x:%02x %"PRIu64" %"PRIu64"]: %c %s\n",
+                          " %02x:%02x %"PRIu64" %"PRIu64"]: %c%c%c%c %s\n",
                       event->mmap2.pid, event->mmap2.tid, event->mmap2.start,
                       event->mmap2.len, event->mmap2.pgoff, event->mmap2.maj,
                       event->mmap2.min, event->mmap2.ino,
                       event->mmap2.ino_generation,
-                      (event->header.misc & PERF_RECORD_MISC_MMAP_DATA) ? 'r' : 'x',
+                      (event->mmap2.prot & PROT_READ) ? 'r' : '-',
+                      (event->mmap2.prot & PROT_WRITE) ? 'w' : '-',
+                      (event->mmap2.prot & PROT_EXEC) ? 'x' : '-',
+                      (event->mmap2.flags & MAP_SHARED) ? 's' : 'p',
                       event->mmap2.filename);
 }
 
index d970232..e5dd40a 100644 (file)
@@ -7,6 +7,7 @@
 #include "../perf.h"
 #include "map.h"
 #include "build-id.h"
+#include "perf_regs.h"
 
 struct mmap_event {
        struct perf_event_header header;
@@ -27,6 +28,8 @@ struct mmap2_event {
        u32 min;
        u64 ino;
        u64 ino_generation;
+       u32 prot;
+       u32 flags;
        char filename[PATH_MAX];
 };
 
@@ -87,6 +90,10 @@ struct regs_dump {
        u64 abi;
        u64 mask;
        u64 *regs;
+
+       /* Cached values/mask filled by first register access. */
+       u64 cache_regs[PERF_REGS_MAX];
+       u64 cache_mask;
 };
 
 struct stack_dump {
index 5c28d82..8606175 100644 (file)
@@ -589,10 +589,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
        }
 
        /*
-        * We default some events to a 1 default interval. But keep
+        * We default some events to have a default interval. But keep
         * it a weak assumption overridable by the user.
         */
-       if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
+       if (!attr->sample_period || (opts->user_freq != UINT_MAX ||
                                     opts->user_interval != ULLONG_MAX)) {
                if (opts->freq) {
                        perf_evsel__set_sample_bit(evsel, PERIOD);
@@ -659,6 +659,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                perf_evsel__set_sample_bit(evsel, WEIGHT);
 
        attr->mmap  = track;
+       attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
 
        if (opts->sample_transaction)
index 5a0a4b2..30df618 100644 (file)
@@ -128,6 +128,8 @@ void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
                               + unresolved_col_width + 2;
                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
                                           symlen);
+                       hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
+                                          symlen + 1);
                } else {
                        symlen = unresolved_col_width + 4 + 2;
                        hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
@@ -439,9 +441,10 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
                        .map    = al->map,
                        .sym    = al->sym,
                },
-               .cpu    = al->cpu,
-               .ip     = al->addr,
-               .level  = al->level,
+               .cpu     = al->cpu,
+               .cpumode = al->cpumode,
+               .ip      = al->addr,
+               .level   = al->level,
                .stat = {
                        .nr_events = 1,
                        .period = period,
index d2bf035..742f49a 100644 (file)
@@ -72,6 +72,7 @@ enum hist_column {
        HISTC_MEM_TLB,
        HISTC_MEM_LVL,
        HISTC_MEM_SNOOP,
+       HISTC_MEM_DCACHELINE,
        HISTC_TRANSACTION,
        HISTC_NR_COLS, /* Last entry */
 };
index 7409ac8..c73e1fc 100644 (file)
@@ -496,18 +496,6 @@ struct process_args {
        u64 start;
 };
 
-static int symbol__in_kernel(void *arg, const char *name,
-                            char type __maybe_unused, u64 start)
-{
-       struct process_args *args = arg;
-
-       if (strchr(name, '['))
-               return 0;
-
-       args->start = start;
-       return 1;
-}
-
 static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
                                           size_t bufsz)
 {
@@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
                scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
 }
 
-/* Figure out the start address of kernel map from /proc/kallsyms */
-static u64 machine__get_kernel_start_addr(struct machine *machine)
+const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
+
+/* Figure out the start address of kernel map from /proc/kallsyms.
+ * Returns the name of the start symbol in *symbol_name. Pass in NULL as
+ * symbol_name if it's not that important.
+ */
+static u64 machine__get_kernel_start_addr(struct machine *machine,
+                                         const char **symbol_name)
 {
        char filename[PATH_MAX];
-       struct process_args args;
+       int i;
+       const char *name;
+       u64 addr = 0;
 
        machine__get_kallsyms_filename(machine, filename, PATH_MAX);
 
        if (symbol__restricted_filename(filename, "/proc/kallsyms"))
                return 0;
 
-       if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0)
-               return 0;
+       for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
+               addr = kallsyms__get_function_start(filename, name);
+               if (addr)
+                       break;
+       }
+
+       if (symbol_name)
+               *symbol_name = name;
 
-       return args.start;
+       return addr;
 }
 
 int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
 {
        enum map_type type;
-       u64 start = machine__get_kernel_start_addr(machine);
+       u64 start = machine__get_kernel_start_addr(machine, NULL);
 
        for (type = 0; type < MAP__NR_TYPES; ++type) {
                struct kmap *kmap;
@@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine)
        return 0;
 }
 
-const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
-
 int machine__create_kernel_maps(struct machine *machine)
 {
        struct dso *kernel = machine__get_kernel(machine);
-       char filename[PATH_MAX];
        const char *name;
-       u64 addr = 0;
-       int i;
-
-       machine__get_kallsyms_filename(machine, filename, PATH_MAX);
-
-       for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
-               addr = kallsyms__get_function_start(filename, name);
-               if (addr)
-                       break;
-       }
+       u64 addr = machine__get_kernel_start_addr(machine, &name);
        if (!addr)
                return -1;
 
@@ -1060,6 +1050,8 @@ int machine__process_mmap2_event(struct machine *machine,
                        event->mmap2.pid, event->mmap2.maj,
                        event->mmap2.min, event->mmap2.ino,
                        event->mmap2.ino_generation,
+                       event->mmap2.prot,
+                       event->mmap2.flags,
                        event->mmap2.filename, type);
 
        if (map == NULL)
@@ -1105,7 +1097,7 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
 
        map = map__new(&machine->user_dsos, event->mmap.start,
                        event->mmap.len, event->mmap.pgoff,
-                       event->mmap.pid, 0, 0, 0, 0,
+                       event->mmap.pid, 0, 0, 0, 0, 0, 0,
                        event->mmap.filename,
                        type);
 
index 8ccbb32..25c571f 100644 (file)
@@ -138,7 +138,7 @@ void map__init(struct map *map, enum map_type type,
 
 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
-                    u64 ino_gen, char *filename,
+                    u64 ino_gen, u32 prot, u32 flags, char *filename,
                     enum map_type type)
 {
        struct map *map = malloc(sizeof(*map));
@@ -157,6 +157,8 @@ struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                map->min = d_min;
                map->ino = ino;
                map->ino_generation = ino_gen;
+               map->prot = prot;
+               map->flags = flags;
 
                if ((anon || no_dso) && type == MAP__FUNCTION) {
                        snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid);
index ae2d451..7758c72 100644 (file)
@@ -35,6 +35,8 @@ struct map {
        bool                    referenced;
        bool                    erange_warned;
        u32                     priv;
+       u32                     prot;
+       u32                     flags;
        u64                     pgoff;
        u64                     reloc;
        u32                     maj, min; /* only valid for MMAP2 record */
@@ -118,7 +120,7 @@ void map__init(struct map *map, enum map_type type,
               u64 start, u64 end, u64 pgoff, struct dso *dso);
 struct map *map__new(struct list_head *dsos__list, u64 start, u64 len,
                     u64 pgoff, u32 pid, u32 d_maj, u32 d_min, u64 ino,
-                    u64 ino_gen,
+                    u64 ino_gen, u32 prot, u32 flags,
                     char *filename, enum map_type type);
 struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
 void map__delete(struct map *map);
index a3539ef..43168fb 100644 (file)
@@ -1,11 +1,15 @@
 #include <errno.h>
 #include "perf_regs.h"
+#include "event.h"
 
 int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
 {
        int i, idx = 0;
        u64 mask = regs->mask;
 
+       if (regs->cache_mask & (1 << id))
+               goto out;
+
        if (!(mask & (1 << id)))
                return -EINVAL;
 
@@ -14,6 +18,10 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id)
                        idx++;
        }
 
-       *valp = regs->regs[idx];
+       regs->cache_mask |= (1 << id);
+       regs->cache_regs[id] = regs->regs[idx];
+
+out:
+       *valp = regs->cache_regs[id];
        return 0;
 }
index 79c78f7..980dbf7 100644 (file)
@@ -2,7 +2,8 @@
 #define __PERF_REGS_H
 
 #include <linux/types.h>
-#include "event.h"
+
+struct regs_dump;
 
 #ifdef HAVE_PERF_REGS_SUPPORT
 #include <perf_regs.h>
@@ -11,6 +12,7 @@ int perf_reg_value(u64 *valp, struct regs_dump *regs, int id);
 
 #else
 #define PERF_REGS_MASK 0
+#define PERF_REGS_MAX  0
 
 static inline const char *perf_reg_name(int id __maybe_unused)
 {
index 0d1542f..9a0a183 100644 (file)
@@ -628,11 +628,11 @@ static int __show_line_range(struct line_range *lr, const char *module)
 
        ret = debuginfo__find_line_range(dinfo, lr);
        debuginfo__delete(dinfo);
-       if (ret == 0) {
+       if (ret == 0 || ret == -ENOENT) {
                pr_warning("Specified source line is not found.\n");
                return -ENOENT;
        } else if (ret < 0) {
-               pr_warning("Debuginfo analysis failed. (%d)\n", ret);
+               pr_warning("Debuginfo analysis failed.\n");
                return ret;
        }
 
@@ -641,7 +641,7 @@ static int __show_line_range(struct line_range *lr, const char *module)
        ret = get_real_path(tmp, lr->comp_dir, &lr->path);
        free(tmp);      /* Free old path */
        if (ret < 0) {
-               pr_warning("Failed to find source file. (%d)\n", ret);
+               pr_warning("Failed to find source file path.\n");
                return ret;
        }
 
@@ -721,9 +721,14 @@ static int show_available_vars_at(struct debuginfo *dinfo,
        ret = debuginfo__find_available_vars_at(dinfo, pev, &vls,
                                                max_vls, externs);
        if (ret <= 0) {
-               pr_err("Failed to find variables at %s (%d)\n", buf, ret);
+               if (ret == 0 || ret == -ENOENT) {
+                       pr_err("Failed to find the address of %s\n", buf);
+                       ret = -ENOENT;
+               } else
+                       pr_warning("Debuginfo analysis failed.\n");
                goto end;
        }
+
        /* Some variables are found */
        fprintf(stdout, "Available variables at %s\n", buf);
        for (i = 0; i < ret; i++) {
index 9d8eb26..98e3047 100644 (file)
@@ -573,14 +573,13 @@ static int find_variable(Dwarf_Die *sc_die, struct probe_finder *pf)
        if (!die_find_variable_at(sc_die, pf->pvar->var, pf->addr, &vr_die)) {
                /* Search again in global variables */
                if (!die_find_variable_at(&pf->cu_die, pf->pvar->var, 0, &vr_die))
+                       pr_warning("Failed to find '%s' in this function.\n",
+                                  pf->pvar->var);
                        ret = -ENOENT;
        }
        if (ret >= 0)
                ret = convert_variable(&vr_die, pf);
 
-       if (ret < 0)
-               pr_warning("Failed to find '%s' in this function.\n",
-                          pf->pvar->var);
        return ret;
 }
 
@@ -1281,7 +1280,11 @@ out:
        return ret;
 }
 
-/* Find available variables at given probe point */
+/*
+ * Find available variables at given probe point
+ * Return the number of found probe points. Return 0 if there is no
+ * matched probe point. Return <0 if an error occurs.
+ */
 int debuginfo__find_available_vars_at(struct debuginfo *dbg,
                                      struct perf_probe_event *pev,
                                      struct variable_list **vls,
index e108207..af7da56 100644 (file)
@@ -215,6 +215,7 @@ static void define_event_symbols(struct event_format *event,
        case PRINT_BSTRING:
        case PRINT_DYNAMIC_ARRAY:
        case PRINT_STRING:
+       case PRINT_BITMASK:
                break;
        case PRINT_TYPE:
                define_event_symbols(event, ev_name, args->typecast.item);
index cd9774d..1c41932 100644 (file)
@@ -197,6 +197,7 @@ static void define_event_symbols(struct event_format *event,
        case PRINT_BSTRING:
        case PRINT_DYNAMIC_ARRAY:
        case PRINT_FUNC:
+       case PRINT_BITMASK:
                /* we should warn... */
                return;
        }
@@ -622,6 +623,7 @@ static int python_generate_script(struct pevent *pevent, const char *outfile)
                        fprintf(ofp, "%s=", f->name);
                        if (f->flags & FIELD_IS_STRING ||
                            f->flags & FIELD_IS_FLAG ||
+                           f->flags & FIELD_IS_ARRAY ||
                            f->flags & FIELD_IS_SYMBOLIC)
                                fprintf(ofp, "%%s");
                        else if (f->flags & FIELD_IS_SIGNED)
index 45512ba..1ec57dd 100644 (file)
@@ -1,3 +1,4 @@
+#include <sys/mman.h>
 #include "sort.h"
 #include "hist.h"
 #include "comm.h"
@@ -784,6 +785,104 @@ static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
        return repsep_snprintf(bf, size, "%-*s", width, out);
 }
 
+static inline  u64 cl_address(u64 address)
+{
+       /* return the cacheline of the address */
+       return (address & ~(cacheline_size - 1));
+}
+
+static int64_t
+sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
+{
+       u64 l, r;
+       struct map *l_map, *r_map;
+
+       if (!left->mem_info)  return -1;
+       if (!right->mem_info) return 1;
+
+       /* group event types together */
+       if (left->cpumode > right->cpumode) return -1;
+       if (left->cpumode < right->cpumode) return 1;
+
+       l_map = left->mem_info->daddr.map;
+       r_map = right->mem_info->daddr.map;
+
+       /* if both are NULL, jump to sort on al_addr instead */
+       if (!l_map && !r_map)
+               goto addr;
+
+       if (!l_map) return -1;
+       if (!r_map) return 1;
+
+       if (l_map->maj > r_map->maj) return -1;
+       if (l_map->maj < r_map->maj) return 1;
+
+       if (l_map->min > r_map->min) return -1;
+       if (l_map->min < r_map->min) return 1;
+
+       if (l_map->ino > r_map->ino) return -1;
+       if (l_map->ino < r_map->ino) return 1;
+
+       if (l_map->ino_generation > r_map->ino_generation) return -1;
+       if (l_map->ino_generation < r_map->ino_generation) return 1;
+
+       /*
+        * Addresses with no major/minor numbers are assumed to be
+        * anonymous in userspace.  Sort those on pid then address.
+        *
+        * The kernel and non-zero major/minor mapped areas are
+        * assumed to be unity mapped.  Sort those on address.
+        */
+
+       if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
+           (!(l_map->flags & MAP_SHARED)) &&
+           !l_map->maj && !l_map->min && !l_map->ino &&
+           !l_map->ino_generation) {
+               /* userspace anonymous */
+
+               if (left->thread->pid_ > right->thread->pid_) return -1;
+               if (left->thread->pid_ < right->thread->pid_) return 1;
+       }
+
+addr:
+       /* al_addr does all the right addr - start + offset calculations */
+       l = cl_address(left->mem_info->daddr.al_addr);
+       r = cl_address(right->mem_info->daddr.al_addr);
+
+       if (l > r) return -1;
+       if (l < r) return 1;
+
+       return 0;
+}
+
+static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
+                                         size_t size, unsigned int width)
+{
+
+       uint64_t addr = 0;
+       struct map *map = NULL;
+       struct symbol *sym = NULL;
+       char level = he->level;
+
+       if (he->mem_info) {
+               addr = cl_address(he->mem_info->daddr.al_addr);
+               map = he->mem_info->daddr.map;
+               sym = he->mem_info->daddr.sym;
+
+               /* print [s] for shared data mmaps */
+               if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
+                    map && (map->type == MAP__VARIABLE) &&
+                   (map->flags & MAP_SHARED) &&
+                   (map->maj || map->min || map->ino ||
+                    map->ino_generation))
+                       level = 's';
+               else if (!map)
+                       level = 'X';
+       }
+       return _hist_entry__sym_snprintf(map, sym, addr, level, bf, size,
+                                        width);
+}
+
 struct sort_entry sort_mispredict = {
        .se_header      = "Branch Mispredicted",
        .se_cmp         = sort__mispredict_cmp,
@@ -876,6 +975,13 @@ struct sort_entry sort_mem_snoop = {
        .se_width_idx   = HISTC_MEM_SNOOP,
 };
 
+struct sort_entry sort_mem_dcacheline = {
+       .se_header      = "Data Cacheline",
+       .se_cmp         = sort__dcacheline_cmp,
+       .se_snprintf    = hist_entry__dcacheline_snprintf,
+       .se_width_idx   = HISTC_MEM_DCACHELINE,
+};
+
 static int64_t
 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
 {
@@ -1043,6 +1149,7 @@ static struct sort_dimension memory_sort_dimensions[] = {
        DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
        DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
        DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
+       DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
 };
 
 #undef DIM
index 5bf0098..041f0c9 100644 (file)
@@ -89,6 +89,7 @@ struct hist_entry {
        u64                     ip;
        u64                     transaction;
        s32                     cpu;
+       u8                      cpumode;
 
        struct hist_entry_diff  diff;
 
@@ -185,6 +186,7 @@ enum sort_type {
        SORT_MEM_TLB,
        SORT_MEM_LVL,
        SORT_MEM_SNOOP,
+       SORT_MEM_DCACHELINE,
 };
 
 /*
index bd5768d..25578b9 100644 (file)
@@ -250,7 +250,6 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
 
        /* Check the .eh_frame section for unwinding info */
        offset = elf_section_offset(fd, ".eh_frame_hdr");
-       close(fd);
 
        if (offset)
                ret = unwind_spec_ehframe(dso, machine, offset,
@@ -271,7 +270,6 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
 
        /* Check the .debug_frame section for unwinding info */
        *offset = elf_section_offset(fd, ".debug_frame");
-       close(fd);
 
        if (*offset)
                return 0;
index 7fff6be..95aefa7 100644 (file)
@@ -17,6 +17,7 @@
  * XXX We need to find a better place for these things...
  */
 unsigned int page_size;
+int cacheline_size;
 
 bool test_attr__enabled;
 
index b03da44..6686436 100644 (file)
@@ -304,6 +304,7 @@ char *rtrim(char *s);
 void dump_stack(void);
 
 extern unsigned int page_size;
+extern int cacheline_size;
 
 void get_term_dimensions(struct winsize *ws);
 
index ae5faf9..790c23a 100644 (file)
@@ -1,6 +1,6 @@
 all:
 
 run_tests:
-       @/bin/sh ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
+       @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
 
 clean:
index aa290c0..552f081 100644 (file)
@@ -193,6 +193,11 @@ int main(int argc, char **argv)
        int msg, pid, err;
        struct msgque_data msgque;
 
+       if (getuid() != 0) {
+               printf("Please run the test as root - Exiting.\n");
+               exit(1);
+       }
+
        msgque.key = ftok(argv[0], 822155650);
        if (msgque.key == -1) {
                printf("Can't make key\n");
index 350bfed..058c76f 100644 (file)
@@ -1,6 +1,6 @@
 all:
 
 run_tests:
-       @/bin/sh ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
+       @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
 
 clean:
index 51267f4..2cede23 100644 (file)
@@ -2,7 +2,7 @@ PROGS := tm-resched-dscr
 
 all: $(PROGS)
 
-$(PROGS):
+$(PROGS): ../harness.c
 
 run_tests: all
        @-for PROG in $(PROGS); do \
index ee98e38..42d4c8c 100644 (file)
@@ -28,6 +28,8 @@
 #include <assert.h>
 #include <asm/tm.h>
 
+#include "utils.h"
+
 #define TBEGIN          ".long 0x7C00051D ;"
 #define TEND            ".long 0x7C00055D ;"
 #define TCHECK          ".long 0x7C00059C ;"
@@ -36,7 +38,8 @@
 #define SPRN_TEXASR     0x82
 #define SPRN_DSCR       0x03
 
-int main(void) {
+int test_body(void)
+{
        uint64_t rv, dscr1 = 1, dscr2, texasr;
 
        printf("Check DSCR TM context switch: ");
@@ -81,10 +84,15 @@ int main(void) {
                }
                if (dscr2 != dscr1) {
                        printf(" FAIL\n");
-                       exit(EXIT_FAILURE);
+                       return 1;
                } else {
                        printf(" OK\n");
-                       exit(EXIT_SUCCESS);
+                       return 0;
                }
        }
 }
+
+int main(void)
+{
+       return test_harness(test_body, "tm_resched_dscr");
+}
index 4473211..e775adc 100644 (file)
@@ -21,7 +21,7 @@ OBJS = tmon.o tui.o sysfs.o pid.o
 OBJS +=
 
 tmon: $(OBJS) Makefile tmon.h
-       $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw  -lpthread
+       $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS)  -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
 
 valgrind: tmon
         sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET)  1> /dev/null
index b30f531..09b7c32 100644 (file)
@@ -142,6 +142,7 @@ static void start_syslog(void)
 static void prepare_logging(void)
 {
        int i;
+       struct stat logstat;
 
        if (!logging)
                return;
@@ -152,6 +153,29 @@ static void prepare_logging(void)
                return;
        }
 
+       if (lstat(TMON_LOG_FILE, &logstat) < 0) {
+               syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE);
+               fclose(tmon_log);
+               tmon_log = NULL;
+               return;
+       }
+
+       /* The log file must be a regular file owned by us */
+       if (S_ISLNK(logstat.st_mode)) {
+               syslog(LOG_ERR, "Log file is a symlink.  Will not log\n");
+               fclose(tmon_log);
+               tmon_log = NULL;
+               return;
+       }
+
+       if (logstat.st_uid != getuid()) {
+               syslog(LOG_ERR, "We don't own the log file.  Not logging\n");
+               fclose(tmon_log);
+               tmon_log = NULL;
+               return;
+       }
+
+
        fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n");
        for (i = 0; i < ptdata.nr_tz_sensor; i++) {
                char binding_str[33]; /* size of long + 1 */
@@ -331,7 +355,7 @@ static void start_daemon_mode()
        disable_tui();
 
        /* change the file mode mask */
-       umask(0);
+       umask(S_IWGRP | S_IWOTH);
 
        /* new SID for the daemon process */
        sid = setsid();
index fe1e66b..a87e99f 100644 (file)
@@ -116,8 +116,8 @@ static const struct {
        .header = {
                .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
                .length = cpu_to_le32(sizeof descriptors),
-               .fs_count = 3,
-               .hs_count = 3,
+               .fs_count = cpu_to_le32(3),
+               .hs_count = cpu_to_le32(3),
        },
        .fs_descs = {
                .intf = {
index 56ff9be..476d3bf 100644 (file)
@@ -1526,17 +1526,33 @@ int kvm_vgic_hyp_init(void)
                goto out_unmap;
        }
 
-       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
-                vctrl_res.start, vgic_maint_irq);
-       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
-
        if (of_address_to_resource(vgic_node, 3, &vcpu_res)) {
                kvm_err("Cannot obtain VCPU resource\n");
                ret = -ENXIO;
                goto out_unmap;
        }
+
+       if (!PAGE_ALIGNED(vcpu_res.start)) {
+               kvm_err("GICV physical address 0x%llx not page aligned\n",
+                       (unsigned long long)vcpu_res.start);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
+       if (!PAGE_ALIGNED(resource_size(&vcpu_res))) {
+               kvm_err("GICV size 0x%llx not a multiple of page size 0x%lx\n",
+                       (unsigned long long)resource_size(&vcpu_res),
+                       PAGE_SIZE);
+               ret = -ENXIO;
+               goto out_unmap;
+       }
+
        vgic_vcpu_base = vcpu_res.start;
 
+       kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
+                vctrl_res.start, vgic_maint_irq);
+       on_each_cpu(vgic_init_maintenance_interrupt, NULL, 1);
+
        goto out;
 
 out_unmap: